Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- testbed/pgmpy__pgmpy/.codacy.yaml +5 -0
- testbed/pgmpy__pgmpy/.gitignore +44 -0
- testbed/pgmpy__pgmpy/.gitmodules +0 -0
- testbed/pgmpy__pgmpy/.pre-commit-config.yaml +16 -0
- testbed/pgmpy__pgmpy/.test_durations +0 -0
- testbed/pgmpy__pgmpy/AUTHORS.rst +119 -0
- testbed/pgmpy__pgmpy/CHANGELOG.md +293 -0
- testbed/pgmpy__pgmpy/CITATION.cff +23 -0
- testbed/pgmpy__pgmpy/CODE_OF_CONDUCT.md +76 -0
- testbed/pgmpy__pgmpy/Contributing.md +106 -0
- testbed/pgmpy__pgmpy/LICENSE +20 -0
- testbed/pgmpy__pgmpy/MANIFEST.in +1 -0
- testbed/pgmpy__pgmpy/README.md +113 -0
- testbed/pgmpy__pgmpy/examples/Creating a Linear Gaussian Bayesian Network.ipynb +0 -0
- testbed/pgmpy__pgmpy/examples/Linear Gaussian Bayesian Network.ipynb +54 -0
- testbed/pgmpy__pgmpy/examples/Monty Hall Problem.ipynb +246 -0
- testbed/pgmpy__pgmpy/funding.json +52 -0
- testbed/pgmpy__pgmpy/meta.yaml +42 -0
- testbed/pgmpy__pgmpy/pgmpy/__init__.py +4 -0
- testbed/pgmpy__pgmpy/pgmpy/base/DAG.py +1319 -0
- testbed/pgmpy__pgmpy/pgmpy/base/UndirectedGraph.py +304 -0
- testbed/pgmpy__pgmpy/pgmpy/base/__init__.py +4 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/EM.py +295 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/ExhaustiveSearch.py +192 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/GES.py +240 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/HillClimbSearch.py +351 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/LinearModel.py +33 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/MLE.py +293 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/MirrorDescentEstimator.py +231 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/MmhcEstimator.py +217 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/SEMEstimator.py +451 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/TreeSearch.py +390 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/__init__.py +62 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/base.py +432 -0
- testbed/pgmpy__pgmpy/pgmpy/estimators/expert.py +207 -0
- testbed/pgmpy__pgmpy/pgmpy/extern/__init__.py +3 -0
- testbed/pgmpy__pgmpy/pgmpy/extern/tabulate.py +970 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/FactorDict.py +75 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/FactorSet.py +385 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/__init__.py +13 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/base.py +169 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/continuous/ContinuousFactor.py +461 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/continuous/LinearGaussianCPD.py +246 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/continuous/__init__.py +12 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/continuous/discretize.py +255 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/discrete/CPD.py +677 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/discrete/DiscreteFactor.py +1043 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/discrete/JointProbabilityDistribution.py +404 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/discrete/__init__.py +5 -0
- testbed/pgmpy__pgmpy/pgmpy/factors/distributions/CanonicalDistribution.py +614 -0
testbed/pgmpy__pgmpy/.codacy.yaml
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exclude_paths:
|
| 2 |
+
- '**.md'
|
| 3 |
+
- 'pgmpy/tests/**'
|
| 4 |
+
- 'docs/**'
|
| 5 |
+
- 'pgmpy/extern/**'
|
testbed/pgmpy__pgmpy/.gitignore
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.py[cod]
|
| 2 |
+
*.swp
|
| 3 |
+
.idea/*
|
| 4 |
+
# C extensions
|
| 5 |
+
*.so
|
| 6 |
+
|
| 7 |
+
# Packages
|
| 8 |
+
*.egg
|
| 9 |
+
*.egg-info
|
| 10 |
+
dist
|
| 11 |
+
build
|
| 12 |
+
_build
|
| 13 |
+
eggs
|
| 14 |
+
parts
|
| 15 |
+
bin
|
| 16 |
+
var
|
| 17 |
+
sdist
|
| 18 |
+
develop-eggs
|
| 19 |
+
.installed.cfg
|
| 20 |
+
lib
|
| 21 |
+
lib64
|
| 22 |
+
|
| 23 |
+
# Installer logs
|
| 24 |
+
pip-log.txt
|
| 25 |
+
|
| 26 |
+
# Unit test / coverage reports
|
| 27 |
+
.coverage
|
| 28 |
+
.tox
|
| 29 |
+
nosetests.xml
|
| 30 |
+
|
| 31 |
+
# Translations
|
| 32 |
+
*.mo
|
| 33 |
+
|
| 34 |
+
# Mr Developer
|
| 35 |
+
.mr.developer.cfg
|
| 36 |
+
.project
|
| 37 |
+
.pydevproject
|
| 38 |
+
|
| 39 |
+
# Auto examples generated
|
| 40 |
+
docs/auto_examples/*
|
| 41 |
+
docs/examples/*
|
| 42 |
+
|
| 43 |
+
# Macos files
|
| 44 |
+
.DS_Store
|
testbed/pgmpy__pgmpy/.gitmodules
ADDED
|
File without changes
|
testbed/pgmpy__pgmpy/.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 3 |
+
rev: v4.6.0
|
| 4 |
+
hooks:
|
| 5 |
+
- id: check-yaml
|
| 6 |
+
- id: end-of-file-fixer
|
| 7 |
+
- id: trailing-whitespace
|
| 8 |
+
- repo: https://github.com/pycqa/isort
|
| 9 |
+
rev: 5.13.2
|
| 10 |
+
hooks:
|
| 11 |
+
- id: isort
|
| 12 |
+
args: ["--profile", "black", "--filter-files"]
|
| 13 |
+
- repo: https://github.com/psf/black
|
| 14 |
+
rev: 24.8.0
|
| 15 |
+
hooks:
|
| 16 |
+
- id: black
|
testbed/pgmpy__pgmpy/.test_durations
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
testbed/pgmpy__pgmpy/AUTHORS.rst
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
People
|
| 2 |
+
------
|
| 3 |
+
|
| 4 |
+
Organisation Admins
|
| 5 |
+
===================
|
| 6 |
+
|
| 7 |
+
* Ankur Ankan <ankurankan@gmail.com>
|
| 8 |
+
|
| 9 |
+
* Abinash Panda <mailme.abinashpanda@gmail.com>
|
| 10 |
+
|
| 11 |
+
Contributors
|
| 12 |
+
============
|
| 13 |
+
|
| 14 |
+
* Ankur Ankan <ankurankan@gmail.com>
|
| 15 |
+
* Abinash Panda <abinash.panda.ece10@itbhu.ac.in>
|
| 16 |
+
* Yashu Seth <yashuseth2503@gmail.com>
|
| 17 |
+
* palashahuja <abhor902@gmail.com>
|
| 18 |
+
* Utkarsh Gupta <utkarsh.gupta550@gmail.com>
|
| 19 |
+
* Vivek Jain <vivek425ster@gmail.com>
|
| 20 |
+
* Utkarsh <utkarsh.gupta550@gmail.com>
|
| 21 |
+
* Pratyaksh Sharma <pratyaksh@me.com>
|
| 22 |
+
* ankurankan <ankurankan@gmail.com>
|
| 23 |
+
* chrisittner <mail@chrisittner.de>
|
| 24 |
+
* navin <navinchandak92@gmail.com>
|
| 25 |
+
* Abinash Panda <mailme.abinashpanda@gmail.com>
|
| 26 |
+
* kislayabhi <abhijeetkislay@gmail.com>
|
| 27 |
+
* finn42 <finnlattimore@gmail.com>
|
| 28 |
+
* Raghav Gupta <raghavg7796@gmail.com>
|
| 29 |
+
* abinashpanda <abinash.panda.ece10@itbhu.ac.in>
|
| 30 |
+
* joncrall <erotemic@gmail.com>
|
| 31 |
+
* lohani2280 <lohani.ayush01@gmail.com>
|
| 32 |
+
* Anavil Tripathi <anaviltripathi@gmail.com>
|
| 33 |
+
* jp111 <jaspreetsingh112@gmail.com>
|
| 34 |
+
* snigam3112 <snigam3112@gmail.com>
|
| 35 |
+
* Ashwini Chaudhary <monty.sinngh@gmail.com>
|
| 36 |
+
* Kshitij Saraogi <KshitijSaraogi@gmail.com>
|
| 37 |
+
* nehasoni <neha.soni.ece10@itbhu.ac.in>
|
| 38 |
+
* Christian Ittner <mail@chrisittner.de>
|
| 39 |
+
* Harish Kashyap <harish.k.kashyap@gmail.com>
|
| 40 |
+
* cs15mtech11007@iith.ac.in <krishankant.singh@suiit.ac.in>
|
| 41 |
+
* Shikhar Nigam <snigam3112@gmail.com>
|
| 42 |
+
* Zhongpeng Lin <zholin@microsoft.com>
|
| 43 |
+
* loudly-soft <hipnet@yahoo.com>
|
| 44 |
+
* Pratik151 <pratikpatel15133@gmail.com>
|
| 45 |
+
* Simon Brugman <sbrugman@users.noreply.github.com>
|
| 46 |
+
* Sitesh Ranjan <siteshjaiswal@gmail.com>
|
| 47 |
+
* Zhongpeng Lin <lin.zhp@gmail.com>
|
| 48 |
+
* Anchit Jain <anchitjain1234@gmail.com>
|
| 49 |
+
* Demyanov <artyom.demyanov96@gmail.com>
|
| 50 |
+
* Justin Tervala <Tervala_Justin@bah.com>
|
| 51 |
+
* Pratyaksh <pratyaksh@me.com>
|
| 52 |
+
* Utkarsh Sinha <sinha.utkarsh1990@gmail.com>
|
| 53 |
+
* abhi95 <abhi.r.j95@gmail.com>
|
| 54 |
+
* mailman_anchit <mailman@anchitja.in>
|
| 55 |
+
* Alaa ElNouby <alaaelnouby@gmail.com>
|
| 56 |
+
* Max Baak <maxbaak@gmail.com>
|
| 57 |
+
* Melissa Turcotte <meturcot@microsoft.com>
|
| 58 |
+
* elkbrsathuji <elkana.baris@mail.huji.ac.il>
|
| 59 |
+
* karttikeya <mangalam@iitk.ac.in>
|
| 60 |
+
* Alex Perusse <perusse.a@gmail.com>
|
| 61 |
+
* Ashutosh-Adhikari <aashu.ad@gmail.com>
|
| 62 |
+
* Ayan Das <dasayan05@hotmail.com>
|
| 63 |
+
* Daan Knoope <daanknoope@gmail.com>
|
| 64 |
+
* Jon Crall <erotemic@gmail.com>
|
| 65 |
+
* Marco Tamassia <tamassia.marco@gmail.com>
|
| 66 |
+
* Pratyaksh Sharma <pratyakshs@users.noreply.github.com>
|
| 67 |
+
* Randy Julian <rkjulian@indigobio.com>
|
| 68 |
+
* Tristan Deleu <tristandeleu@users.noreply.github.com>
|
| 69 |
+
* all3fox <all3fox@gmail.com>
|
| 70 |
+
* heldmo <52459329+heldmo@users.noreply.github.com>
|
| 71 |
+
* joncrall <crallj@rpi.edu>
|
| 72 |
+
* sudarshan <ssudk1896@gmail.com>
|
| 73 |
+
* vivek425ster <vivek425ster@gmail.com>
|
| 74 |
+
* Alireza Mehrtash <alireza.mehrtash@gmail.com>
|
| 75 |
+
* Arijit-hydrated <parijit10@gmail.com>
|
| 76 |
+
* Bhavya Bahl <bhavyabahl.1@gmail.com>
|
| 77 |
+
* BrettW <brettwitty@brettwitty.net>
|
| 78 |
+
* Chris Kamphuis <mail@chriskamphuis.com>
|
| 79 |
+
* Céline Comte <celine.comte@nokia.com>
|
| 80 |
+
* Deepak Garg <deepakgargi99e@gmail.com>
|
| 81 |
+
* Fabio Colella <fcole90@gmail.com>
|
| 82 |
+
* IvanaXu <1440420407@qq.com>
|
| 83 |
+
* Jaidev Deshpande <deshpande.jaidev@gmail.com>
|
| 84 |
+
* Jinyan Guan <jig189@ucsd.edu>
|
| 85 |
+
* Joan Zheng <jzheng@sift.net>
|
| 86 |
+
* Jon Badger <jonathancbadger@gmail.com>
|
| 87 |
+
* JustinTervala <Tervala_Justin@bah.com>
|
| 88 |
+
* Kris Singh <krishnakant.singh@suiit.ac.in>
|
| 89 |
+
* Lorenzo Mario Amorosa <38985168+Lostefra@users.noreply.github.com>
|
| 90 |
+
* Marcus Östling <mpt.ostling@gmail.com>
|
| 91 |
+
* Maximilian Kurthen <MKurthen@live.de>
|
| 92 |
+
* Nishant Nikhil <nishantiam@gmail.com>
|
| 93 |
+
* Paul Chaignon <paul.chaignon@orange.com>
|
| 94 |
+
* Rafael Ballester-Ripoll <rballri@gmail.com>
|
| 95 |
+
* Randy <sharmane578@gmail.com>
|
| 96 |
+
* Roy Abitbol <roy.abitbol@gmail.com>
|
| 97 |
+
* Sahithi Kilaru <Sahithi Kilaru>
|
| 98 |
+
* Saket Choudhary <saketkc@gmail.com>
|
| 99 |
+
* Sandeep Narayanaswami <Sandeep.Narayanaswami@capitalone.com>
|
| 100 |
+
* Sandeep Narayanaswami <scoffes@gmail.com>
|
| 101 |
+
* Sandhya Kumari <sandhya05jan@gmail.com>
|
| 102 |
+
* Steffen Roecker <sroecker@gmail.com>
|
| 103 |
+
* The Gitter Badger <badger@gitter.im>
|
| 104 |
+
* Ubuntu <ubuntu@ip-172-31-32-148.us-west-2.compute.internal>
|
| 105 |
+
* William Lyon <wlyon@quantpost.com>
|
| 106 |
+
* Yi(Amy) Sui <suiyiamy@gmail.com>
|
| 107 |
+
* Yichuan <63798002+yzh211@users.noreply.github.com>
|
| 108 |
+
* Yidi Huang <huangy6@yahoo.com>
|
| 109 |
+
* abinash <abinash@abinash-Inspiron-N4010.(none)>
|
| 110 |
+
* davenza <daries100@gmail.com>
|
| 111 |
+
* fabriziov <venfab@gmail.com>
|
| 112 |
+
* julian.reichinger <julian.reichinger@dynatrace.com>
|
| 113 |
+
* mjmt05 <melissa.turcotte@gmail.com>
|
| 114 |
+
* njwhite <n.j.white@gmail.com>
|
| 115 |
+
* pdoongarwal <pdoongarwal@gmail.com>
|
| 116 |
+
* pheman <rbzhang100@gmail.com>
|
| 117 |
+
* rishabmarkand <rishab.markand@gmail.com>
|
| 118 |
+
* sleepy-owl <sleepy-owl@github.com>
|
| 119 |
+
* yashml <yashml@outlook.com>
|
testbed/pgmpy__pgmpy/CHANGELOG.md
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Changelog
|
| 2 |
+
All notable changes to this project will be documented in this file.
|
| 3 |
+
|
| 4 |
+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
| 5 |
+
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
| 6 |
+
|
| 7 |
+
## [0.1.26] - 2024-08-09
|
| 8 |
+
### Added
|
| 9 |
+
1. Support for returning Belief Propagation messages in Factor Graph BP.
|
| 10 |
+
2. Maximum Likelihood Estimator for Junction Tree.
|
| 11 |
+
3. Adds a simple discretization method: `pgmpy.utils.discretize`.
|
| 12 |
+
4. Two new metrics for model testing: `pgmpy.metrics.implied_cis` and `pgmpy.metrics.fisher_c`.
|
| 13 |
+
5. Support for Linear Gaussian Bayesian Networks: estimation, prediction, simulation and random model generation.
|
| 14 |
+
7. New mixed data Conditional Independence test based on canonical correlations.
|
| 15 |
+
8. New LLM based structure learning / causal discovery algorithm. Also LLM based pairwise variable orientation method.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
### Fixed
|
| 19 |
+
1. Reading and Writing from XBN file format.
|
| 20 |
+
2. Documentation for plotting models.
|
| 21 |
+
3. Fixes PC algorithm to add disconnected nodes in the final model.
|
| 22 |
+
4. Allows `.` in variables names in BIF file format.
|
| 23 |
+
|
| 24 |
+
### Changed
|
| 25 |
+
1. Allows `virtual_evidence` parameter in inference methods to accept DiscreteFactor objects.
|
| 26 |
+
|
| 27 |
+
## [0.1.25] - 2024-03-08
|
| 28 |
+
### Added
|
| 29 |
+
1. `init_cpds` argument to `ExpecattionMaximiation.get_parameters` to specify initialization values.
|
| 30 |
+
2. BeliefPropagation with message passing for Factor Graphs.
|
| 31 |
+
3. Marginal Inference for undirected graphs.
|
| 32 |
+
|
| 33 |
+
### Fixed
|
| 34 |
+
1. Incompatibality with networkx==3.2.
|
| 35 |
+
2. `CausalInference.get_minimal_adjustment_set` to accept string variable names.
|
| 36 |
+
3. Bug in EM when latent varaibles are present.
|
| 37 |
+
4. `compat_fns.copy` to consider the case when int or float is passed.
|
| 38 |
+
5. Fixes issue with `BayesianNetwork.fit_update` when running with CUDA backend.
|
| 39 |
+
|
| 40 |
+
### Changed
|
| 41 |
+
1. Documentation Updates
|
| 42 |
+
2. Optimizations for Hill Climb Search algorithm.
|
| 43 |
+
3. Tests shutdown parallel workers in teardown.
|
| 44 |
+
4. Removes the `complete_samples_only` argument from `BaseEstimator.state_counts`.
|
| 45 |
+
5. Default number of cores to use changed to 1 for parameter estimation methods.
|
| 46 |
+
|
| 47 |
+
## [0.1.24] - 2023-06-30
|
| 48 |
+
### Added
|
| 49 |
+
1. Added support for python 3.11.
|
| 50 |
+
2. Adds `DAG.to_graphviz` and `PDAG.to_graphviz` methods to convert model to graphviz objects.
|
| 51 |
+
3. Adds pytorch as an alternative backend.
|
| 52 |
+
4. Adds unicode support for BIFReader.
|
| 53 |
+
|
| 54 |
+
### Fixed
|
| 55 |
+
1. Warnings use a logger instance.
|
| 56 |
+
2. Fixes documentation.
|
| 57 |
+
3. Fixes variables name arguments for `CausalInference.get_minimal_adjustment_set`
|
| 58 |
+
|
| 59 |
+
### Changed
|
| 60 |
+
1. Adds argument to specify samples for ApproxInference.
|
| 61 |
+
2. Memory optimizations for computing structure scores.
|
| 62 |
+
3. Switches joblib backed to loky.
|
| 63 |
+
4. Runtime optimizations for sampling.
|
| 64 |
+
5. Runtime optimizations for Variable Elimination.
|
| 65 |
+
6. All config variables moved to `pgmpy.global_vars`.
|
| 66 |
+
|
| 67 |
+
## [0.1.23] - 2023-06-30
|
| 68 |
+
### Added
|
| 69 |
+
1. BIFReader made compatible with the output of PyAgrum
|
| 70 |
+
2. Support for all available CI tests in PC algorithm.
|
| 71 |
+
3. References for read/write file formats.
|
| 72 |
+
|
| 73 |
+
### Removed
|
| 74 |
+
1. Removes `DAG.to_pdag` method.
|
| 75 |
+
|
| 76 |
+
### Changed
|
| 77 |
+
1. Fixes for ApproxInference for DBNs.
|
| 78 |
+
2. Make `xml.etree` the default parser instead of using lxml.
|
| 79 |
+
|
| 80 |
+
## [0.1.22] - 2023-04-08
|
| 81 |
+
### Added
|
| 82 |
+
1. AIC score metric from score based structure learning.
|
| 83 |
+
2. Adds support for NET (HUGIN) file format.
|
| 84 |
+
3. Adds argument reindex to `state_counts` method.
|
| 85 |
+
|
| 86 |
+
### Fixed
|
| 87 |
+
1. Bug in GibbsSampling when sampling from Bayesian Networks.
|
| 88 |
+
2. Fix seed for all simulation methods.
|
| 89 |
+
3. Memory leaks when using `lru_cache`.
|
| 90 |
+
|
| 91 |
+
### Changed
|
| 92 |
+
1. Caching disabled for computing state name counts during structure learning.
|
| 93 |
+
2. Pre-computation for sampling methods are optimized.
|
| 94 |
+
|
| 95 |
+
## [0.1.21] - 2022-12-31
|
| 96 |
+
### Added
|
| 97 |
+
1. `BayesianNetwork.get_state_probability` method to compute the probability of a given evidence.
|
| 98 |
+
2. `BayesianEstimator.estimate_cpd` accepts weighted datasets.
|
| 99 |
+
|
| 100 |
+
### Fixed
|
| 101 |
+
1. Fixes bug in `CausalInference.estimate_ate` with front-door criterion.
|
| 102 |
+
2. Fixes inference bugs when variable has a single state.
|
| 103 |
+
|
| 104 |
+
## [0.1.20] - 2022-09-30
|
| 105 |
+
### Added
|
| 106 |
+
1. `BayesianNetwork.get_random_cpds` method to randomly parameterize a network structure.
|
| 107 |
+
2. Faster Variable Elimination using tensor contraction.
|
| 108 |
+
3. `factors.factor_sum_product` method for faster sum-product operations using tensor contraction.
|
| 109 |
+
|
| 110 |
+
### Fixed
|
| 111 |
+
1. Bug in `DynamicBayesianNetwork.initialize_initial_state`. #1564
|
| 112 |
+
2. Bug in `factors.factor_product`. #1565
|
| 113 |
+
|
| 114 |
+
### Changed
|
| 115 |
+
1. Runtime improvements in `DiscreteFactor.marginalize` and `DiscreteFactor.copy` methods.
|
| 116 |
+
|
| 117 |
+
## [0.1.19] - 2022-06-30
|
| 118 |
+
### Added
|
| 119 |
+
1. Adds checks for arguments to `BayesianNetwork.simulate` method.
|
| 120 |
+
|
| 121 |
+
### Fixed
|
| 122 |
+
1. Fixes TAN algorithm to use conditional information metric.
|
| 123 |
+
2. Speed ups for all estimation and inference methods.
|
| 124 |
+
3. Fix in stable variant of PC algorithm to give reproducible results.
|
| 125 |
+
4. Fix in `GibbsSampling` for it to work with variables with integral names.
|
| 126 |
+
5. `DAG.active_trail_nodes` allows tuples as variable names.
|
| 127 |
+
6. Fixes CPD and edge creation in `UAIReader`.
|
| 128 |
+
|
| 129 |
+
## [0.1.18] - 2022-03-30
|
| 130 |
+
### Fixed
|
| 131 |
+
1. Fixes `CausalInference.is_valid_backdoor_adjustment_set` to accept str arguments for `Z`.
|
| 132 |
+
2. Fixes `BayesianNetwork.remove_cpd` to work with integral node names.
|
| 133 |
+
3. Fixes `MPLP.map_query` to return the variable states instead of probability values.
|
| 134 |
+
4. Fixes BIFWriter to generate output in standard BIF format.
|
| 135 |
+
|
| 136 |
+
## [0.1.17] - 2021-12-30
|
| 137 |
+
### Added
|
| 138 |
+
1. Adds BayesianNetwork.states property to store states of all the variables.
|
| 139 |
+
2. Adds extra checks in check model for state names
|
| 140 |
+
|
| 141 |
+
### Fixed
|
| 142 |
+
1. Fixes typos in BayesianModel deprecation warning
|
| 143 |
+
2. Bug fix in printing Linear Gaussian CPD
|
| 144 |
+
3. Update example notebooks to work on latest dev.
|
| 145 |
+
|
| 146 |
+
## [0.1.16] - 2021-09-30
|
| 147 |
+
### Added
|
| 148 |
+
1. Adds a `fit_update` method to `BayesianNetwork` for updating model using new data.
|
| 149 |
+
2. Adds `simulate` method to `BayesianNetwork` and `DynamicBayesianNetwork` to simulated data under different conditions.
|
| 150 |
+
3. Adds `DynamicBayesianNetwork.fit` method to learn model paramters from data.
|
| 151 |
+
4. `ApproxInference` class to do approximate inference on models using sampling.
|
| 152 |
+
5. Robust tests for all sampling methods.
|
| 153 |
+
6. Adds `BayesianNetwork.load` and `BayesianNetwork.save` to quickly read and write files.
|
| 154 |
+
|
| 155 |
+
### Changed
|
| 156 |
+
1. `BayesianModel` and `MarkovModel` renamed to `BayesianNetwork` and `MarkovNetwork` respectively.
|
| 157 |
+
2. The default value of node position in `DAG.to_daft` method.
|
| 158 |
+
3. Documentation updated on the website.
|
| 159 |
+
|
| 160 |
+
### Fixed
|
| 161 |
+
1. Fixes bug in `DAG.is_iequivalent` method.
|
| 162 |
+
2. Automatically truncate table when CPD is too large.
|
| 163 |
+
3. Auto-adjustment of probability values when they don't exactly sum to 1.
|
| 164 |
+
4. tqdm works both in notebooks and terminal.
|
| 165 |
+
5. Fixes bug in `CausalInference.query` method.
|
| 166 |
+
|
| 167 |
+
## [0.1.15] - 2021-06-30
|
| 168 |
+
### Added
|
| 169 |
+
1. Adds network pruning for inference algrithms to reduce the size of network before
|
| 170 |
+
running inference.
|
| 171 |
+
2. Adds support for latent variables in DAG and BayesianModel.
|
| 172 |
+
3. Parallel implementation for parameter estimation algorithms.
|
| 173 |
+
4. Adds `DAG.get_random` and `BayesianModel.get_random` methods to be able to generate random models.
|
| 174 |
+
5. Adds `CausalInference.query` method for doing do operation inference with or without adjustment sets.
|
| 175 |
+
6. Adds functionality to treesearch to do auto root and class node selection (#1418)
|
| 176 |
+
7. Adds option to specify virtual evidence in bayesian network inference.
|
| 177 |
+
8. Adds Expectation-Maximization (EM) algorithm for parameter estimation in latent variable models.
|
| 178 |
+
9. Add `BDeuScore` as another option for structure score when using HillClimbSearch.
|
| 179 |
+
10. Adds CausalInference.get_minimal_adjustment_set` for finding adjustment sets.
|
| 180 |
+
|
| 181 |
+
### Changed
|
| 182 |
+
1. Renames `DAG.is_active_trail` to `is_dconnected`.
|
| 183 |
+
2. `DAG.do` can accept multiple variables in the argument.
|
| 184 |
+
3. Optimizes sampling methods.
|
| 185 |
+
4. CI moved from travis and appveyor to github actions.
|
| 186 |
+
5. Drops support for python 3.6. Requires 3.7+.
|
| 187 |
+
|
| 188 |
+
### Fixed
|
| 189 |
+
1. Example model files were not getting included in the pypi and conda packages.
|
| 190 |
+
2. The order of values returned by CI tests was wrong. #1403
|
| 191 |
+
3. Adjusted and normalized MI wasn't working properly in TreeSearch.
|
| 192 |
+
4. #1423: Value error in bayesian estimation.
|
| 193 |
+
5. Fixes bug in `DiscreteFactor.__eq__` to also consider the state names order.
|
| 194 |
+
|
| 195 |
+
## [0.1.14] - 2021-03-31
|
| 196 |
+
### Added
|
| 197 |
+
1. Adds support for python 3.9.
|
| 198 |
+
2. `BayesianModelProbability` class for calculating pmf for BNs.
|
| 199 |
+
3. BayesianModel.predict has a new argument `stochastic` which returns stochastic results instead of MAP.
|
| 200 |
+
4. Adds new method pgmpy.base.DAG.to_daft to easily convert models into publishable plots.
|
| 201 |
+
|
| 202 |
+
### Changed
|
| 203 |
+
1. `pgmpy.utils.get_example_model` now doesn't need internet connection to work. Files moved locally.
|
| 204 |
+
|
| 205 |
+
### Fixed
|
| 206 |
+
1. Latex output of `pgmpy.DAG.get_independencies`.
|
| 207 |
+
2. Bug fix in PC algorithm as it was skipping some combinations.
|
| 208 |
+
3. Error in sampling because of seed not correctly set.
|
| 209 |
+
|
| 210 |
+
## [0.1.13] - 2020-12-30
|
| 211 |
+
### Added
|
| 212 |
+
1. New conditional independence tests for discrete variables
|
| 213 |
+
|
| 214 |
+
### Changed
|
| 215 |
+
1. Adds warning in BayesianEstimator when using dirichlet prior.
|
| 216 |
+
|
| 217 |
+
### Fixed
|
| 218 |
+
1. Bug in `PC.skeleton_to_pdag`.
|
| 219 |
+
2. Bug in `HillClimbSearch` when no legal operations.
|
| 220 |
+
|
| 221 |
+
### Removed
|
| 222 |
+
|
| 223 |
+
## [0.1.12] - 2020-09-30
|
| 224 |
+
### Added
|
| 225 |
+
1. PC estimator with original, stable, and parallel variants.
|
| 226 |
+
2. PDAG class to represent partially directed DAGs.
|
| 227 |
+
3. `pgmpy.utils.get_example_model` function to fetch models from bnlearn repository.
|
| 228 |
+
4. Refactor HillClimbSearch with a new feature to specify fixed edges in the model.
|
| 229 |
+
5. Adds a global `SHOW_PROGRESS` variable.
|
| 230 |
+
6. Adds Chow-Liu structure learning algorithm.
|
| 231 |
+
7. Add `pgmpy.utils.get_example_model` to fetch models from bnlearn's repository.
|
| 232 |
+
8. Adds `get_value` and `set_value` method to `DiscreteFactor` to get/set a single value.
|
| 233 |
+
9. Adds `get_acestral_graph` to `DAG`.
|
| 234 |
+
|
| 235 |
+
### Changed
|
| 236 |
+
1. Refactors ConstraintBasedEstimators into PC with a lot of general improvements.
|
| 237 |
+
2. Improved (faster, new arguments) indepenedence tests with changes in argument.
|
| 238 |
+
3. Refactors `sample_discrete` method. Sampling algorithms much faster.
|
| 239 |
+
4. Refactors `HillClimbSearch` to be faster.
|
| 240 |
+
5. Sampling methods now return dataframe of type categorical.
|
| 241 |
+
|
| 242 |
+
### Fixed
|
| 243 |
+
|
| 244 |
+
### Removed
|
| 245 |
+
1. `Data` class.
|
| 246 |
+
|
| 247 |
+
## [0.1.11] - 2020-06-30
|
| 248 |
+
### Added
|
| 249 |
+
- New example notebook: Alarm.ipynb
|
| 250 |
+
- Support for python 3.8
|
| 251 |
+
- Score Caching support for scoring methods.
|
| 252 |
+
|
| 253 |
+
### Changed
|
| 254 |
+
- Code quality check moved to codacy from landscape
|
| 255 |
+
- Additional parameter `max_ci_vars` for `ConstraintBasedEstimator`.
|
| 256 |
+
- Additional parameter `pseudo_count` for K2 score.
|
| 257 |
+
- Sampling methods return state names instead of number when available.
|
| 258 |
+
- XMLBIFReader and BIFReader not accepts argument for specifying state name type.
|
| 259 |
+
|
| 260 |
+
### Fixed
|
| 261 |
+
- Additional checks for TabularCPD values shape.
|
| 262 |
+
- `DiscreteFactor.reduce` accepts both state names and state numbers for variables.
|
| 263 |
+
- `BeliefPropagation.query` fixed to return normalized CPDs.
|
| 264 |
+
- Bug in flip operation in `HillClimbSearch`.
|
| 265 |
+
- BIFWriter to write the state names to file if available.
|
| 266 |
+
- `BayesianModel.to_markov_model` fixed to work with disconnected graphs.
|
| 267 |
+
- VariableElimination fixed to not ignore identifical factors.
|
| 268 |
+
- Fixes automatic sorting of state names in estimators.
|
| 269 |
+
|
| 270 |
+
### Removed
|
| 271 |
+
- No support for ProbModelXML file format.
|
| 272 |
+
|
| 273 |
+
## [0.1.10] - 2020-01-22
|
| 274 |
+
### Added
|
| 275 |
+
- Documentation updated to include Structural Equation Models(SEM) and Causal Inference.
|
| 276 |
+
- Adds Mmhc estimator.
|
| 277 |
+
|
| 278 |
+
### Changed
|
| 279 |
+
- BdeuScore is renamed to BDeuScore.
|
| 280 |
+
- Refactoring of NaiveBayes
|
| 281 |
+
- Overhaul of CI and setup infrastructure.
|
| 282 |
+
- query methods check for common variabls in variable and evidence argument.
|
| 283 |
+
|
| 284 |
+
### Fixed
|
| 285 |
+
- Example notebooks for Inference.
|
| 286 |
+
- DAG.moralize gives consistent results for disconnected graphs.
|
| 287 |
+
- Fixes problems with XMLBIF and BIF reader and writer classes to be consistent.
|
| 288 |
+
- Better integration of state names throughout the package.
|
| 289 |
+
- Improves remove_factors and add_factors methods of FactorGraph
|
| 290 |
+
- copy method of TabularCPD and DiscreteFactor now makes a copy of state names.
|
| 291 |
+
|
| 292 |
+
### Removed
|
| 293 |
+
- six not a dependency anymore.
|
testbed/pgmpy__pgmpy/CITATION.cff
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
message: "If you use this software, please cite it as below."
|
| 3 |
+
authors:
|
| 4 |
+
- family-names: "Ankan"
|
| 5 |
+
given-names: "Ankur"
|
| 6 |
+
- family-names: "Textor"
|
| 7 |
+
given-names: "Johannes"
|
| 8 |
+
title: "pgmpy: A Python Toolkit for Bayesian Networks"
|
| 9 |
+
version: 0.1.26
|
| 10 |
+
url: "https://github.com/pgmpy/pgmpy"
|
| 11 |
+
preferred-citation:
|
| 12 |
+
type: article
|
| 13 |
+
authors:
|
| 14 |
+
- family-names: "Ankan"
|
| 15 |
+
given-names: "Ankur"
|
| 16 |
+
- family-names: "Textor"
|
| 17 |
+
given-names: "Johannes"
|
| 18 |
+
journal: "Journal Of Machine Learning Research"
|
| 19 |
+
start: 1
|
| 20 |
+
end: 8
|
| 21 |
+
title: "pgmpy: A Python Toolkit for Bayesian Networks"
|
| 22 |
+
issue: 25
|
| 23 |
+
year: 2024
|
testbed/pgmpy__pgmpy/CODE_OF_CONDUCT.md
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Code of Conduct
|
| 2 |
+
|
| 3 |
+
## Our Pledge
|
| 4 |
+
|
| 5 |
+
In the interest of fostering an open and welcoming environment, we as
|
| 6 |
+
contributors and maintainers pledge to make participation in our project and
|
| 7 |
+
our community a harassment-free experience for everyone, regardless of age, body
|
| 8 |
+
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
| 9 |
+
level of experience, education, socio-economic status, nationality, personal
|
| 10 |
+
appearance, race, religion, or sexual identity and orientation.
|
| 11 |
+
|
| 12 |
+
## Our Standards
|
| 13 |
+
|
| 14 |
+
Examples of behavior that contributes to creating a positive environment
|
| 15 |
+
include:
|
| 16 |
+
|
| 17 |
+
* Using welcoming and inclusive language
|
| 18 |
+
* Being respectful of differing viewpoints and experiences
|
| 19 |
+
* Gracefully accepting constructive criticism
|
| 20 |
+
* Focusing on what is best for the community
|
| 21 |
+
* Showing empathy towards other community members
|
| 22 |
+
|
| 23 |
+
Examples of unacceptable behavior by participants include:
|
| 24 |
+
|
| 25 |
+
* The use of sexualized language or imagery and unwelcome sexual attention or
|
| 26 |
+
advances
|
| 27 |
+
* Trolling, insulting/derogatory comments, and personal or political attacks
|
| 28 |
+
* Public or private harassment
|
| 29 |
+
* Publishing others' private information, such as a physical or electronic
|
| 30 |
+
address, without explicit permission
|
| 31 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
| 32 |
+
professional setting
|
| 33 |
+
|
| 34 |
+
## Our Responsibilities
|
| 35 |
+
|
| 36 |
+
Project maintainers are responsible for clarifying the standards of acceptable
|
| 37 |
+
behavior and are expected to take appropriate and fair corrective action in
|
| 38 |
+
response to any instances of unacceptable behavior.
|
| 39 |
+
|
| 40 |
+
Project maintainers have the right and responsibility to remove, edit, or
|
| 41 |
+
reject comments, commits, code, wiki edits, issues, and other contributions
|
| 42 |
+
that are not aligned to this Code of Conduct, or to ban temporarily or
|
| 43 |
+
permanently any contributor for other behaviors that they deem inappropriate,
|
| 44 |
+
threatening, offensive, or harmful.
|
| 45 |
+
|
| 46 |
+
## Scope
|
| 47 |
+
|
| 48 |
+
This Code of Conduct applies within all project spaces, and it also applies when
|
| 49 |
+
an individual is representing the project or its community in public spaces.
|
| 50 |
+
Examples of representing a project or community include using an official
|
| 51 |
+
project e-mail address, posting via an official social media account, or acting
|
| 52 |
+
as an appointed representative at an online or offline event. Representation of
|
| 53 |
+
a project may be further defined and clarified by project maintainers.
|
| 54 |
+
|
| 55 |
+
## Enforcement
|
| 56 |
+
|
| 57 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
| 58 |
+
reported by contacting the project maintainer at ankurankan@gmail.com. All
|
| 59 |
+
complaints will be reviewed and investigated and will result in a response that
|
| 60 |
+
is deemed necessary and appropriate to the circumstances. The project team is
|
| 61 |
+
obligated to maintain confidentiality with regard to the reporter of an incident.
|
| 62 |
+
Further details of specific enforcement policies may be posted separately.
|
| 63 |
+
|
| 64 |
+
Project maintainers who do not follow or enforce the Code of Conduct in good
|
| 65 |
+
faith may face temporary or permanent repercussions as determined by other
|
| 66 |
+
members of the project's leadership.
|
| 67 |
+
|
| 68 |
+
## Attribution
|
| 69 |
+
|
| 70 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
| 71 |
+
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
| 72 |
+
|
| 73 |
+
[homepage]: https://www.contributor-covenant.org
|
| 74 |
+
|
| 75 |
+
For answers to common questions about this code of conduct, see
|
| 76 |
+
https://www.contributor-covenant.org/faq
|
testbed/pgmpy__pgmpy/Contributing.md
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to pgmpy
|
| 2 |
+
|
| 3 |
+
Hi! Thanks for your interest in contributing to [pgmpy](https://pgmpy.org). This
|
| 4 |
+
document summarizes everything that you need to know to get started.
|
| 5 |
+
|
| 6 |
+
## Code and Issues
|
| 7 |
+
|
| 8 |
+
We use [Github](https://github.com/pgmpy/pgmpy) to host all our code. We also use github
|
| 9 |
+
as our [issue tracker](https://github.com/pgmpy/pgmpy/issues). Please feel free to
|
| 10 |
+
create a new issue for any bugs, questions etc. It is very helpful if you follow the
|
| 11 |
+
issue template while creating new issues as it gives us enough information to reproduce
|
| 12 |
+
the problem. You can also refer to github's
|
| 13 |
+
[guide](https://guides.github.com/features/issues/) on how to efficiently use github
|
| 14 |
+
issues.
|
| 15 |
+
|
| 16 |
+
### Git and our Branching model
|
| 17 |
+
|
| 18 |
+
#### Git
|
| 19 |
+
|
| 20 |
+
We use [Git](http://git-scm.com/) as our [version control
|
| 21 |
+
system](http://en.wikipedia.org/wiki/Revision_control), so the best way to contribute is
|
| 22 |
+
to learn how to use it and put your changes on a Git repository. There is plenty of
|
| 23 |
+
online resources available to get started with Git:
|
| 24 |
+
- Online tool to try git: [try git
|
| 25 |
+
tutorial](https://try.github.io/levels/1/challenges/1)
|
| 26 |
+
- Quick intro to opening your first Pull Request:
|
| 27 |
+
https://www.freecodecamp.org/news/how-to-make-your-first-pull-request-on-github-3/
|
| 28 |
+
- Git reference: [Pro Git book](http://git-scm.com/book/).
|
| 29 |
+
|
| 30 |
+
#### Forks + GitHub Pull Requests
|
| 31 |
+
|
| 32 |
+
We use [gitflow](http://nvie.com/posts/a-successful-git-branching-model/) to manage our
|
| 33 |
+
branches.
|
| 34 |
+
|
| 35 |
+
Summary of our git branching model:
|
| 36 |
+
- Fork the desired repository on GitHub to your account.
|
| 37 |
+
- Clone your forked repository locally: `git clone git@github.com:your-username/repository-name.git`.
|
| 38 |
+
- Create a new branch off of `dev` branch with a descriptive name (for example:
|
| 39 |
+
`feature/portuguese-sentiment-analysis`, `hotfix/bug-on-downloader`). You can
|
| 40 |
+
do it by switching to `dev` branch: `git checkout dev` and then
|
| 41 |
+
creating a new branch: `git checkout -b name-of-the-new-branch`.
|
| 42 |
+
- Make changes to the codebase and commit it. <b> [Imp] </b> Make sure that tests pass for each of your commits.
|
| 43 |
+
- Rebase your branch on the current dev and push to your fork on GitHub (with the name as your local branch:
|
| 44 |
+
`git push origin branch-name`
|
| 45 |
+
- Create a pull request using GitHub's Web interface (asking us to pull the
|
| 46 |
+
changes from your new branch and add the changes to our `dev` branch).;
|
| 47 |
+
- Wait for reviews and comments.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
#### Tips
|
| 51 |
+
|
| 52 |
+
- <b> [Imp] </b> Write [helpful commit
|
| 53 |
+
messages](http://robots.thoughtbot.com/5-useful-tips-for-a-better-commit-message).
|
| 54 |
+
- Anything in the `dev` branch should be deployable (no failing tests).
|
| 55 |
+
- Never use `git add .`: it can add unwanted files;
|
| 56 |
+
- Avoid using `git commit -a` unless you know what you're doing;
|
| 57 |
+
- Check every change with `git diff` before adding then to the index (stage
|
| 58 |
+
area) and with `git diff --cached` before committing;
|
| 59 |
+
- If you have push access to the main repository, please do not commit directly
|
| 60 |
+
to `dev`: your access should be used only to accept pull requests; if you
|
| 61 |
+
want to make a new feature, you should use the same process as other
|
| 62 |
+
developers so that your code can be reviewed.
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
### Code Guidelines
|
| 66 |
+
|
| 67 |
+
- We use `black`(https://black.readthedocs.io/en/stable/) for our code formatting.
|
| 68 |
+
- Write tests for your new features (please see "Tests" topic below);
|
| 69 |
+
- Always remember that [commented code is dead
|
| 70 |
+
code](http://www.codinghorror.com/blog/2008/07/coding-without-comments.html);
|
| 71 |
+
- Name identifiers (variables, classes, functions, module names) with readable
|
| 72 |
+
names (`x` is always wrong);
|
| 73 |
+
- When manipulating strings, use [Python's f-Strings](https://realpython.com/python-f-strings/)
|
| 74 |
+
(`f'{a} = {b}'` instead of `'{} = {}'.format(a, b)`);
|
| 75 |
+
- When working with files use `with open(<filename>, <option>) as f` instead of
|
| 76 |
+
` f = open(<filename>, <option>)`;
|
| 77 |
+
- All `#TODO` comments should be turned into issues (use our
|
| 78 |
+
[GitHub issue system](https://github.com/pgmpy/pgmpy/issues));
|
| 79 |
+
- Run all tests before pushing (just execute `nosetests`) so you will know if your
|
| 80 |
+
changes broke something;
|
| 81 |
+
|
| 82 |
+
### Tests
|
| 83 |
+
|
| 84 |
+
We use [Travis CI](https://travis-ci.org/) for continuous integration for linux systems
|
| 85 |
+
and [AppVeyor](https://www.appveyor.com/) for Windows systems. We use python [unittest
|
| 86 |
+
module](https://docs.python.org/2/library/unittest.html) for writing tests. You should
|
| 87 |
+
write tests for every feature you add or bug you solve in the code. Having automated
|
| 88 |
+
tests for every line of our code let us make big changes without worries: there will
|
| 89 |
+
always be tests to verify if the changes introduced bugs or lack of features. If we
|
| 90 |
+
don't have tests we will be blind and every change will come with some fear of possibly
|
| 91 |
+
breaking something.
|
| 92 |
+
|
| 93 |
+
For a better design of your code, we recommend using a technique called [test-driven
|
| 94 |
+
development](https://en.wikipedia.org/wiki/Test-driven_development), where you write
|
| 95 |
+
your tests **before** writing the actual code that implements the desired feature.
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
## Discussion
|
| 99 |
+
|
| 100 |
+
Please feel free to contact us through the mailing list if you have any questions or
|
| 101 |
+
suggestions. Connect with us at [gitter](https://gitter.im/pgmpy/pgmpy). All
|
| 102 |
+
contributions are very welcome!
|
| 103 |
+
|
| 104 |
+
*Mailing list* : pgmpy@googlegroups.com
|
| 105 |
+
|
| 106 |
+
Happy hacking! ;)
|
testbed/pgmpy__pgmpy/LICENSE
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2013-2024 pgmpy
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
| 6 |
+
this software and associated documentation files (the "Software"), to deal in
|
| 7 |
+
the Software without restriction, including without limitation the rights to
|
| 8 |
+
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
| 9 |
+
the Software, and to permit persons to whom the Software is furnished to do so,
|
| 10 |
+
subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
| 17 |
+
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
| 18 |
+
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
| 19 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
| 20 |
+
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
testbed/pgmpy__pgmpy/MANIFEST.in
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
include pgmpy/utils/example_models/*.bif.gz
|
testbed/pgmpy__pgmpy/README.md
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<img src="https://raw.githubusercontent.com/pgmpy/pgmpy/dev/logo/logo_color.png" width="318" height="300"/>
|
| 3 |
+
</div>
|
| 4 |
+
<br/>
|
| 5 |
+
<div align="center">
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
[](https://pypistats.org/packages/pgmpy)
|
| 9 |
+
[](https://pypi.org/project/pgmpy/)
|
| 10 |
+
[](https://pypi.org/project/pgmpy/)
|
| 11 |
+
[](https://github.com/pgmpy/pgmpy/blob/dev/LICENSE)
|
| 12 |
+
[](http://pgmpy.org/pgmpy-benchmarks/)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
</div>
|
| 16 |
+
|
| 17 |
+
<div align="center">
|
| 18 |
+
|
| 19 |
+
[](https://discord.gg/DRkdKaumBs)
|
| 20 |
+
[](https://pgmpy.org)
|
| 21 |
+
[](https://github.com/pgmpy/pgmpy/tree/dev/examples)
|
| 22 |
+
[](https://github.com/pgmpy/pgmpy_notebook)
|
| 23 |
+
|
| 24 |
+
</div>
|
| 25 |
+
|
| 26 |
+
pgmpy is a Python package for working with Bayesian Networks and related models such as Directed Acyclic Graphs, Dynamic Bayesian Networks, and Structural Equation Models. It combines features from causal inference and probabilistic inference literature to allow users to seamlessly work between them. It implements algorithms for structure learning, causal discovery, parameter estimation, probabilistic and causal inference, and simulations.
|
| 27 |
+
|
| 28 |
+
- **Documentation:** https://pgmpy.org/
|
| 29 |
+
- **Installation:** https://pgmpy.org/started/install.html
|
| 30 |
+
- **Mailing List:** https://groups.google.com/forum/#!forum/pgmpy .
|
| 31 |
+
- **Community chat:** [discord](https://discord.gg/DRkdKaumBs) (Older chat at: [gitter](https://gitter.im/pgmpy/pgmpy))
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
Examples
|
| 35 |
+
--------
|
| 36 |
+
- Creating a Bayesian Network: [view](https://pgmpy.org/examples/Creating%20a%20Discrete%20Bayesian%20Network.html) | <a target="_blank" href="https://colab.research.google.com/github/ankurankan/pgmpy/blob/dev/examples/Creating%20a%20Discrete%20Bayesian%20Network.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a>
|
| 37 |
+
- Structure Learning/Causal Discovery: [view](https://pgmpy.org/examples/Structure%20Learning%20in%20Bayesian%20Networks.html) | <a target="_blank" href="https://colab.research.google.com/github/ankurankan/pgmpy/blob/dev/examples/Structure%20Learning%20in%20Bayesian%20Networks.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a>
|
| 38 |
+
- Parameter Learning: [view](https://pgmpy.org/examples/Learning%20Parameters%20in%20Discrete%20Bayesian%20Networks.html) | <a target="_blank" href="https://colab.research.google.com/github/ankurankan/pgmpy/blob/dev/examples/Learning%20Parameters%20in%20Discrete%20Bayesian%20Networks.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a>
|
| 39 |
+
- Probabilistic Inference: [view](https://pgmpy.org/examples/Inference%20in%20Discrete%20Bayesian%20Networks.html) | <a target="_blank" href="https://colab.research.google.com/github/ankurankan/pgmpy/blob/dev/examples/Inference%20in%20Discrete%20Bayesian%20Networks.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a>
|
| 40 |
+
- Causal Inference: [view](https://pgmpy.org/examples/Causal%20Inference.html) | <a target="_blank" href="https://colab.research.google.com/github/https://pgmpy.org/examples/Causal%20Inference.html"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a>
|
| 41 |
+
- Extending pgmpy: [view](https://pgmpy.org/examples/Extending%20pgmpy.html) | <a target="_blank" href="https://colab.research.google.com/github/ankurankan/pgmpy/blob/dev/examples/Extending%20pgmpy.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a>
|
| 42 |
+
|
| 43 |
+
<br/>
|
| 44 |
+
|
| 45 |
+
- Full List of Examples: https://github.com/pgmpy/pgmpy/tree/dev/examples
|
| 46 |
+
- Tutorials: https://github.com/pgmpy/pgmpy_notebook/
|
| 47 |
+
|
| 48 |
+
Citing
|
| 49 |
+
======
|
| 50 |
+
If you use `pgmpy` in your scientific work, please consider citing us:
|
| 51 |
+
|
| 52 |
+
```
|
| 53 |
+
Ankur Ankan, & Johannes Textor (2024). pgmpy: A Python Toolkit for Bayesian Networks. Journal of Machine Learning Research, 25(265), 1–8.
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Bibtex:
|
| 57 |
+
```
|
| 58 |
+
@article{Ankan2024,
|
| 59 |
+
author = {Ankur Ankan and Johannes Textor},
|
| 60 |
+
title = {pgmpy: A Python Toolkit for Bayesian Networks},
|
| 61 |
+
journal = {Journal of Machine Learning Research},
|
| 62 |
+
year = {2024},
|
| 63 |
+
volume = {25},
|
| 64 |
+
number = {265},
|
| 65 |
+
pages = {1--8},
|
| 66 |
+
url = {http://jmlr.org/papers/v25/23-0487.html}
|
| 67 |
+
}
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
Development
|
| 71 |
+
============
|
| 72 |
+
|
| 73 |
+
Code
|
| 74 |
+
----
|
| 75 |
+
The latest codebase is available in the `dev` branch of the repository.
|
| 76 |
+
|
| 77 |
+
Building from Source
|
| 78 |
+
--------------------
|
| 79 |
+
To install pgmpy from the source code:
|
| 80 |
+
```
|
| 81 |
+
$ git clone https://github.com/pgmpy/pgmpy
|
| 82 |
+
$ cd pgmpy/
|
| 83 |
+
$ pip install -r requirements.txt
|
| 84 |
+
$ python setup.py install
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
To run the tests, you can use pytest:
|
| 88 |
+
```
|
| 89 |
+
$ pytest -v pgmpy
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
If you face any problems during installation let us know, via issues, mail or at our discord channel.
|
| 93 |
+
|
| 94 |
+
Contributing
|
| 95 |
+
------------
|
| 96 |
+
Please feel free to report any issues on GitHub: https://github.com/pgmpy/pgmpy/issues.
|
| 97 |
+
|
| 98 |
+
Before opening a pull request, please have a look at our [contributing guide](
|
| 99 |
+
https://github.com/pgmpy/pgmpy/blob/dev/Contributing.md) If you face any
|
| 100 |
+
problems in pull request, feel free to ask them on the mailing list or gitter.
|
| 101 |
+
|
| 102 |
+
If you would like to implement any new features, please have a discussion about it before starting to work on it.
|
| 103 |
+
If you are looking for some ideas for projects, we a list of **mentored projects** available at: https://github.com/pgmpy/pgmpy/wiki/Mentored-Projects.
|
| 104 |
+
|
| 105 |
+
Building Documentation
|
| 106 |
+
----------------------
|
| 107 |
+
We use sphinx to build the documentation. Please refer: https://github.com/pgmpy/pgmpy/wiki/Maintenance-Guide#building-docs for steps to build docs locally.
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
License
|
| 112 |
+
=======
|
| 113 |
+
pgmpy is released under MIT License. You can read about our license at [here](https://github.com/pgmpy/pgmpy/blob/dev/LICENSE)
|
testbed/pgmpy__pgmpy/examples/Creating a Linear Gaussian Bayesian Network.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
testbed/pgmpy__pgmpy/examples/Linear Gaussian Bayesian Network.ipynb
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [
|
| 8 |
+
{
|
| 9 |
+
"data": {
|
| 10 |
+
"text/plain": [
|
| 11 |
+
"array([ 3, 8, 10])"
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
"execution_count": 1,
|
| 15 |
+
"metadata": {},
|
| 16 |
+
"output_type": "execute_result"
|
| 17 |
+
}
|
| 18 |
+
],
|
| 19 |
+
"source": [
|
| 20 |
+
"from pgmpy.factors.continuous import LinearGaussianCPD\n",
|
| 21 |
+
"import numpy as np\n",
|
| 22 |
+
"\n",
|
| 23 |
+
"mu = np.array([2, 3, 8, 10])\n",
|
| 24 |
+
"sigma = np.array([[2.3, 0, 0, 0], [0, 1.5, 0, 0], [0, 0, 1.7, 0], [0, 0, 0, 2]])\n",
|
| 25 |
+
"\n",
|
| 26 |
+
"cpd = LinearGaussianCPD(\"Y\", mu, sigma, [\"U1\", \"U2\", \"U3\"])\n",
|
| 27 |
+
"cpd.variable\n",
|
| 28 |
+
"cpd.evidence\n",
|
| 29 |
+
"cpd.beta_vector"
|
| 30 |
+
]
|
| 31 |
+
}
|
| 32 |
+
],
|
| 33 |
+
"metadata": {
|
| 34 |
+
"kernelspec": {
|
| 35 |
+
"display_name": "Python 3",
|
| 36 |
+
"language": "python",
|
| 37 |
+
"name": "python3"
|
| 38 |
+
},
|
| 39 |
+
"language_info": {
|
| 40 |
+
"codemirror_mode": {
|
| 41 |
+
"name": "ipython",
|
| 42 |
+
"version": 3
|
| 43 |
+
},
|
| 44 |
+
"file_extension": ".py",
|
| 45 |
+
"mimetype": "text/x-python",
|
| 46 |
+
"name": "python",
|
| 47 |
+
"nbconvert_exporter": "python",
|
| 48 |
+
"pygments_lexer": "ipython3",
|
| 49 |
+
"version": "3.6.0"
|
| 50 |
+
}
|
| 51 |
+
},
|
| 52 |
+
"nbformat": 4,
|
| 53 |
+
"nbformat_minor": 2
|
| 54 |
+
}
|
testbed/pgmpy__pgmpy/examples/Monty Hall Problem.ipynb
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"source": [
|
| 7 |
+
"# Monty Hall Problem"
|
| 8 |
+
]
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"cell_type": "markdown",
|
| 12 |
+
"metadata": {},
|
| 13 |
+
"source": [
|
| 14 |
+
"### Problem Description:\n",
|
| 15 |
+
"The Monty Hall Problem is a very famous problem in Probability Theory. The question goes like:\n",
|
| 16 |
+
"\n",
|
| 17 |
+
"\n",
|
| 18 |
+
"Suppose you're on a game show, and you're given the choice of three doors: Behind one door is a car; behind the others, goats. You pick a door, say No. 1, and the host, who knows what's behind the doors, opens another door, say No. 3, which has a goat. He then says to you, \"Do you want to pick door No. 2?\" Is it to your advantage to switch your choice?\n",
|
| 19 |
+
"\n",
|
| 20 |
+
"By intution it seems that there shouldn't be any benefit of switching the door. But using Bayes' Theorem we can show that by switching the door the contestant has more chances of winning.\n",
|
| 21 |
+
"\n",
|
| 22 |
+
"You can also checkout the wikipedia page: https://en.wikipedia.org/wiki/Monty_Hall_problem"
|
| 23 |
+
]
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"cell_type": "markdown",
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"source": [
|
| 29 |
+
"### Probabilistic Interpretetion:\n",
|
| 30 |
+
"So have 3 random variables Contestant $C \\in \\{1, 2, 3\\}$, Host $H \\in \\{1, 2, 3\\}$ and prize $P \\in \\{1, 2, 3 \\}$. The prize has been put randomly behind the doors therefore: $P(P=1) = P(P=2) = P(P=3) = \\frac{1}{3}$. Also, the contestant is going to choose the door randomly, therefore: $P(C=1) = P(C=2) = P(C=3) = \\frac{1}{3}$. For this problem we can build a Bayesian Network structure like:\n",
|
| 31 |
+
"\n"
|
| 32 |
+
]
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"cell_type": "code",
|
| 36 |
+
"execution_count": 4,
|
| 37 |
+
"metadata": {},
|
| 38 |
+
"outputs": [
|
| 39 |
+
{
|
| 40 |
+
"data": {
|
| 41 |
+
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAdcAAAHXCAYAAADuhBDBAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAXEQAAFxEByibzPwAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3X1UVPedx/HPIAio+Byzq2JywKjn4POwjWZ9SI6gJq2ePOBG15ytTX2IaOz2SZNsd3tiztq1x+1JbaqCxCWbRm2jtkm2RlE3krixSRmNDzSRLFSNpj5gAgIBBObuH+6MgqgM/O7ceXi/zvGUMsO939ty53M/lzt3XJZlWQIAAMbEOD0AAACRhnAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMAwwhUAAMMIVwAADCNcAQAwjHAFAMCwWKcHQPs1NDTo+PHj8ng8Onz4sMrLy1VXV6e6ujrFxsYqISFBXbp00ZAhQ+R2u+V2u3XnnXc6PTYQlc6fPy+PxyOPx6OSkhJ99dVXqqurU2NjoxISEpSQkKC+fftqzJgxcrvdGj58uOLi4pweG+1EuIaRxsZG/f73v9euXbvk8Xh09OhR1dfXB7SMAQMGyO12695779UTTzyhQYMG2TQtEN1Onz6tX/3qV/rggw/k8Xh09uzZgH4+Pj5eI0eOlNvt1vTp0/X1r39dsbG8ZIcLl2VZltND4NbOnz+vvLw85eTk6LPPPmv2WI8ePfytdODAgf4j4KamJtXW1qqqqkrHjh2Tx+PRiRMndP3/3TExMZoxY4ays7OVkZGhmBj+SgB0hNfr1d69e7Vu3Tq99dZb8nq9/sdcLpeGDh0qt9utESNGKCkpSYmJierUqZP/jNOZM2f87baysrLZspOTk7Vo0SLNnz+fM1BhgHANYQcPHtTatWu1fft2NTQ0SJL69u2ruXPnavz48UpPT1dKSopcLleblldVVaWPPvpIHo9Hb775pt555x3/Y4MHD9bixYs1f/58de/e3ZbtASLV5cuXlZeXp/Xr1+t///d//d9/4IEHNHPmTLndbo0ePVpJSUltWp5lWSorK1NRUZEOHjyoX/3qV7p06ZIkKS4uTo899piWLVum8ePH27I9MMBCyKmsrLTmz59vSfL/Gz9+vPXqq69atbW1xtbzpz/9yVq2bJnVvXt3/3qSk5Ot3bt3G1sHEOl2795tJScn+/eh7t27W8uWLbP+9Kc/GVtHbW2t9eqrr1rjx49v9rqwYMECq7Ky0th6YA7hGmJa7qjz5s2zDh06ZOs6q6urrdzcXCslJYWdFmijlgfBKSkpVm5urlVdXW3reg8dOmTNmzePA+IQR7iGiJY7ampqqlVYWBjUGaqrq61ly5ax0wK30fIgeNmyZbaHakuFhYUcEIcwwjUElJWVWampqY7uqNdrudOuXLnS8nq9js0DhAqv12s9//zzjh4EX6/lAXFqaqpVVlbm2Dy4hnB1WHFxsdW/f39LknXXXXc5uqNer7q62lq6dKl/p/3e975HwCKqeb1e67vf/a5/n1i6dKmjB8HXKywstO666y5LktW/f3+ruLjY6ZGiHuHqoOLiYqtPnz6WJGv48OHW559/7vRIN/j5z3/ufzHJzs4mYBGVvF6vlZ2d7d8X1q5d6/RIN/j888+ttLQ0S5LVt29fAtZhhKtDysrK/I01PT3dunTpktMj3dR//Md/WC6Xy5JkPffcc06PAwTds88+a0myXC6XlZ+f7/Q4N1VeXm6lp6f7GyyniJ1DuDqgsrLS/zfW4cOHh3Sw+mzYsMF/1L5u3TqnxwGC5pe//KX/dz8nJ8fpcW6rvLzc32BTU1O5yMkhhKsDFixY4P8bayieCr6ZVatWWZKsxMREq6SkxOlxANuVlJRYiYmJliRr1apVTo/TZmfPnvX/DXbBggVOjxOVuENTkBUUFGjatGmSpMLCQk2aNMnhidrOsixlZmZq3759mjBhggoLC7llIiKW1+vV5MmTdeDAAWVkZKigoKDNd0MLBYWFhbr//vslSbt379bUqVOdHSjK8MoYRJWVlZo/f74kadmyZWEVrNLVe6O+/PLL6tatmw4cOKBf/OIXTo8E2Gbt2rU6cOCAkpKSlJeXF1bBKkmTJ0/W008/LUmaP3++Ll++7PBE0YXmGkQLFy7Uxo0blZqaqiNHjqhr165Oj9QuOTk5euqpp5SYmKgjR47onnvucXokwKhPP/1Uo0aNUm1trXJycrRw4UKnR2qXmpoajRw5UmVlZVqwYIFyc3OdHilqEK5BcvDgQd13332Swu90cEvXnx6eNm2adu3a5fRIgFHTp0/X7t27w/J0cEvXnx5+//33udl/kHBaOEjWrl0rSZo3b15YB6t09fRwbm6uYmJitHv3bn3yySdOjwQY8/HHH2v37t2KiYlRTk5OWAerdPX08Lx58ySJP+UEEeEaBOfOndP27dslSd/5znccnsaMlJQUfeMb35AkbdiwweFpAHN8v88zZsxQSkqKw9OY4fvb67Zt23T+/HmHp4kOhGsQvPzyy2poaND48eM1evRop8cxJjs7W5KUn5+vmpoah6cBOq6mpkb5+fmSrv1+R4KxY8dq3Lhxamho0Msvv+z0OFGBcLVZY2OjcnJyJJnbWTMzM9WrVy+5XC7/P7fbrczMTGVmZsrtdsvtdmvWrFk6dOiQkXXebI7U1FRVVlZqy5Yttq0HCJbNmzfr8uXLGjx4sDIyMmxZx969e7Vo0SKlpqY222/37t3rf85Pf/pT4/uu7/Vnw4YNampqMrpstMK5t9hGh9/97nf+e32a/KBzy7KsrKysW941Jicnx5JkLVy40Oh6r7dmzRpLkjVmzBjb1gEEy+jRoy1J1r//+78bX3Zpaak1duxYKyUlxXr99ddveHz16tVWVlaWVVpaakmyPB6P0fXX1tb672X+xhtvGF02bkRztZnvStq5c+cqISHB6LJ79+7d7D9bWrhwoZYvX67c3Fz99Kc/Nbpun3nz5snlcunw4cO6cOGCLesAguH8+fP66KOP5HK59M1vftPosg8dOiS32y1JKi0tVVZW1g3PWb58uZ599lmlpqYaXbdPQkKC5s6dK0lc4R8EhKvNPB6PJDl2+fvjjz8uSfrJT35iy/L79OmjIUOGSLq2rUA48v3+Dh06VH369DG23IqKCk2ZMkWStG/fvls+d+zYsVq9erWxdbfkex1iX7Uf4WqjK1eu6OjRo5Kk9PR0R2bo2bOnpKs7eFlZmS3r8G1bUVGRLcsHgsEXOKb31RUrVqiiokKrV6/274+3snz5cqPrv55v244cOaKGhgbb1gPC1VbFxcWqr69Xz549Hbukv6Kiwv+1XTP4TndxNIxw5js49P0+m1BRUeG/K9Lf/d3ftfnn7LqYKjU1VT169FB9fb2Ki4ttWQeuIlxt5AubsWPHOvZGdN8ViHYeDROuiAS+31+T4fqb3/xG0tUD27a0Vp8VK1YE9Py2crlcGjt2rCT2V7sRrjY6fPiwJLM7ayD27t2rFStWKCsry9a/44wZM0Yul0tnzpzRxYsXbVsPYJeLFy/q7NmzcrlcRt+LXlpaKinws0YZGRm2n2my8216kGKdHiCSlZeXS5IGDhxo63pycnK0Z88e/3//4osv/F/v2bPHtlNMPklJSerRo4cqKip06dIl3XHHHbauDzDNt6/27NlTSUlJxpbru87BjhbaXr7Xo0uXLjk8SWQjXG1UV1cnScbfgtPSrFmzHP/UDt82+rYZCCd27au+9nn9tQ9OY18NDk4L2yhY4RoKEhMTJbHDIjzZta/63rMa6JX6doYx+2pwEK42io29emIgGm411tjYKOnaNgPhxK591XeFcFlZWUCB+Zvf/Ma2t86xrwYH4Woj31FwbW2tw5PYz7eN0dDSEXns2ld79uzp/5ON78rhtigtLbXtgib21eAgXG3UpUsXSVJVVZXDk9jL6/Wqurpa0rVTTkA48e2r1dXVsizL6LJ9N49YsWJFm55fVlZm9A5RLflej9hX7UW42sh3W8Bjx445PIm9SktLVVdXp/j4eA0aNMjpcYCAJScnKz4+XrW1tf63z5jSs2dP/20P2/K2vBUrVtj6vnTf65Hv9Qn2IFxtZPfNFXxvuXH6SkTf9o0aNUpxcXGOzgK0R+fOnTVy5EhJ9uyvY8eO9S+3V69e/rs2XW/v3r2aNWuWre9Jl+y7zSOaI1xt5AvXEydOGD017Psc1W3btkm6eqSbmpqqRYsWGVtHIOy4sw0QbHYfDKekpMjj8ej111/Xnj17bvg817KyMr3++uu23iq1qqpKJSUlkthf7cblYja68847NWDAAJ09e1YfffSRJk6caGS5198wIhQQrogEwbqNZ0ZGhu03drmZw4cPy7IsDRw4UP369XNkhmhBc7VZpN9317Is/23UCFeEs+tvC2j6oqZQwYFw8BCuNrv33nslSW+++abDk9jjvffeU2Vlpbp27aq0tDSnxwHaLS0tTV26dFFFRYXee+89p8exxVtvvSXp2usS7EO42uyJJ55QTEyM3nnnHX388cdOj2PcunXrJElz587lYiaEtc6dO2vu3LmSpPXr1zs8jXkff/yx3nnnHcXExOiJJ55wepyIR7jabNCgQZoxY4YkacOGDQ5PY9a5c+e0fft2SdLixYsdngbouOzsbEnS9u3bde7cOYenMct3wDBz5kwlJyc7PE3kI1yDwLfD5ufnq6amxuFpzMnLy1NjY6Puu+8+ox/TBThl9OjRGj9+vBoaGvTyyy87PY4x1dXVeuWVVyRdez2CvQjXIMjIyNDgwYN1+fJlbd682elxjGhsbFROTo4kdlZEFt/vc05Ojv8+vOFu8+bNunz5su655x5NmTLF6XGiAuEaBDExMf7Tpv/2b/8WEe01Ly9PZ86cUd++fZWVleX0OIAxWVlZ6tu3rz777LOIaK81NTX+G1MsXrxYMTG87AcD/ysHyfz585WcnKyysjI999xzTo/TISdPntQPf/hDSdKPfvQjxcfHOzwRYE5CQoJ+9KMfSZJ+8IMf6NSpUw5P1DHPPvusysrKlJycrG9/+9tOjxM1XFakvqErBBUUFGjatGmSpMLCQk2aNMnhiQJnWZYyMzO1b98+TZw4Ufv37+dIGBHH6/Vq8uTJOnDggDIyMlRQUCCXy+X0WAErLCzU/fffL+nq609mZqazA0URXhWDaOrUqVqwYIEk6cknnwzL08O5ubnat2+fEhMTtWnTJoIVESkmJkabNm1SYmKi9u7dq40bNzo9UsBqamr05JNPSpIWLFhAsAYZr4xBtmbNGiUnJ6u0tFTPPPOM0+ME5M9//rN+8IMfSJJ+8pOfaPDgwQ5PBNjnnnvu0apVqyRJ3//+9/XnP//Z4YkC88wzz6isrEyDBg3SmjVrnB4n6hCuQda9e3fl5eVJkl566SWtXbvW4Yna5sKFC5o+fbqqq6s1ceJEPf30006PBNhu2bJlmjBhgqqrq/Xggw/qwoULTo/UJj//+c/10ksvSbp68WH37t0dnij6EK4OmDp1qlauXClJ+s53vqP8/HxnB7qNiooKTZs2TSUlJRo0aJBee+01TgcjKsTExGjz5s0aNGiQTpw4oWnTpjn+EY+3k5+fr3/8x3+UJK1cuZLTwU6x4Aiv12t997vftSRZLpfLysnJcXqkVp0/f94aPXq0Jcm68847rZKSEqdHAoKupKTE6tevnyXJGjNmjHX+/HmnR2rVhg0bLJfLZUmyvve971ler9fpkaIW4eogr9drZWdnW5IsSdaqVatCamcoKyuzhgwZ4g/Wo0ePOj0S4JijR4/6A3bIkCFWWVmZ0yP5eb1ea9WqVf7XkiVLloTUa0k0Ilwd5vV6reeee86/U2RkZFgnT550fKYNGzZY3bp1syRZgwYNorEC1tUGO2jQIEuS1a1bN2vDhg2Oh9jJkyetKVOm+F9D/umf/snxmUC4hox169ZZiYmJju+0LXfUiRMnWqdPnw76HECoOn36tDVhwgTHD4hbHgQnJiZa69atC/ocaB3hGkJKSkpu2GlLS0uDsu6GhgZr/fr1zXbUF1980WpqagrK+oFw0tTUZL344ovNDojXr19vNTQ0BGX9paWlNxwEf/rpp0FZN9qGcA0xLXfamJgYa+bMmdauXbtsCbq//OUv1gsvvGANHDiQHRUIUMsD4oEDB1ovvPCC9Ze//MX4upqamqxdu3ZZM2fOtGJiYjgIDnGEa4gqKSmxpk2b5t9pJVmpqanWmjVrrPLy8g4t2+v1WoWFhdbjjz9uxcbGNlvHhAkT2FGBADQ1NVl/+7d/22w/io2NtR5//HGrsLCww3/eKS8vt9asWWOlpqY2W8e0adM4CA5h3Fs4xH3yySfasGGD8vPzVVlZKUlyuVwaMmSI0tPT5Xa75Xa7NWbMGCUlJd3w816vV6WlpfJ4PP5/hw4d8i9Lku677z69//77zX4mHO+jCjjBsqxm7/tuuT/16NFDY8eO9e+r6enpSk1NbXUfq6qq0uHDh+XxeFRUVCSPx6OSkhL5XqZ79OihefPm6amnntKwYcPs3zi0G+EaJmpqarRlyxatW7dOhw8fvuFxl8ulHj16KCEhQYmJiWpsbFRtba2qq6tVV1d3w/O7du2quXPnavHixRo9erSOHz+uESNGSJKWLFniv7sLgFvLzs7W+vXrJUnHjx9XWlqaPvroI61fv16vvfZaq/cQT0hIULdu3ZSYmKjY2FjV1taqrq5OlZWVau0lecyYMcrOztacOXPUtWtX27cJHUe4hqELFy40O7L1eDw6c+bMTZ8fHx+vUaNG+Y+c3W630tLSFBcX1+x51x9J016B22vZWlu+nDY0NKi4uLjZmaMjR46ovr7+psscOHBgs5brdrvVr18/27YB9iBcI8TFixd16dIl1dXVqa6uTrGxsf4WO2jQoBuCtDW0VyAwrbXW22loaNDp06f9bbWxsVEJCQlKSEhQnz59dMcdd9g9NoKAcEUztFegbW7XWhHduPs6mjl27Jj/az75Bri5JUuW+L8+fvy4g5MgFNFccQPaK3BrtFbcDs0VN6C9ArdGa8Xt0FzRKtor0DpaK9qC5opW0V6B1tFa0RY0V9wU7RVojtaKtqK54qZor0BztFa0Fc0Vt0R7Ba6itSIQNFfcEu0VuIrWikDQXHFbtFdEO1orAkVzxW3RXhHtaK0IFM0VbUJ7RbSitaI9aK5oE9orohWtFe1Bc0Wb0V4RbWitaC+aK9qM9opoQ2tFe9FcERDaK6IFrRUdQXNFQGiviBa0VnQEzRUBo70i0tFa0VE0VwSM9opIR2tFR9Fc0S60V0QqWitMoLmiXWiviFS0VphAc0W70V4RaWitMIXminajvSLS0FphCs0VHUJ7RaSgtcIkmis6hPaKSEFrhUk0V3QY7RXhjtYK02iu6DDaK8IdrRWm0VxhBO0V4YrWCjvQXGEE7RXhitYKO9BcYQztFeGG1gq70FxhDO0V4YbWCrvQXGEU7RXhgtYKO9FcYRTtFeGC1go70VxhHO0VoY7WCrvRXGEc7RWhjtYKu9FcYQvaK0IVrRXBQHOFLWivCFW0VgQDzRW2ob0i1NBaESw0V9iG9opQQ2tFsNBcYSvaK0IFrRXBRHOFrWivCBW0VgQTzRW2o73CabRWBBvNFbajvcJptFYEG80VQUF7hVNorXACzRVBQXuFU2itcALNFUFDe0Ww0VrhFJorgob2imCjtcIpNFcEFe0VwUJrhZNorggq2iuChdYKJ9FcEXS0V9iN1gqn0VwRdLRX2I3WCqfRXOEI2ivsQmtFKKC5whG0V9iF1opQQHOFY2ivMI3WilBBc4VjaK8wjdaKUEFzhaNorzCF1opQQnOFo2ivMIXWilBCc4XjaK/oKForQg3NFY6jvaKjaK0INTRXhATaK9qL1opQRHNFSKC9or1orQhFNFeEDNorAkVrRaiiuSJk0F4RKForQhXNFSGF9oq2orUilNFcEVJor2grWitCGc0VIYf2ituhtSLU0VwRcmivuB1aK0IdzRUhifaKm6G1IhzQXBGSaK+4GVorwgHNFSGL9oqWaK0IFzRXhCzaK1qitSJc0FwR0miv8KG1IpzQXBHSaK/wobUinNBcEfJor6C1ItzQXBHyaK+gtSLc0FwRFmiv0YvWinBEc0VYoL1GL1orwhHNFWGD9hp9aK0IVzRXhA3aa/ShtSJc0VwRVmiv0YPWinBGc0VYub69Llu2zMFJYLelS5f6v6a1ItzQXBF2aK+Rj9aKcEdzRdihvUY+WivCHc0VYYn2GrlorYgENFeEJdpr5KK1IhLQXBG2aK+Rh9aKSEFzRdiivUYeWisiBc0VYY32GjlorYgkNFeENdpr5KC1IpLQXBH2aK/hj9aKSENzRdijvYY/WisiDc0VEYH2Gr5orYhENFdEBNpr+KK1IhLRXBExaK/hh9aKSEVzRcSgvYYfWisiFc0VEYX2Gj5orYhkNFdEFNpr+KC1IpLRXBFxaK+hj9aKSEdzRcShvYY+WisiHc0VEYn2GrporYgGNFdEJNpr6KK1IhrQXBGxaK+hh9aKaEFzRcSivYYeWiuiBc0VEY32GjporYgmNFdENNpr6KC1IprQXBHxaK/Oo7Ui2tBcEfFor86jtSLa0FwRFWivzqG1IhrRXBEVaK/OobUiGtFcETVor8FHa0W0orkiatBeg4/WimhFc0VUob0GD60V0YzmiqhCew0eWiuiGc0VUYf2aj9aK6IdzRVRh/ZqP1oroh3NFVGJ9mofWitAc0WUor3ah9YK0FwRxWiv5tFagatorohatFfzaK3AVTRXRDXaqzm0VuAamiuiGu3VHForcA3NFVGP9tpxtFagOZoroh7tteNorUBzNFdAtNeOoLUCN6K5AqK9dgStFbgRzRX4f7TXwNFagdbRXIH/R3sNHK0VaB3NFbgO7bXtaK3AzdFcgevQXtuO1grcHM0VaIH2enu0VuDWaK5AC7TX26O1ArdGcwVaQXu9OVorcHs0V6AVtNebo7UCt0dzBW6C9nojWivQNjRX4CZorzeitQJtQ3MFboH2eg2tFWg7mitwC7TXa2itQNvRXIHboL3SWoFA0VyB26C90lqBQNFcgTaI5vZKawUCR3MF2iCa2yutFQgczRVoo2hsr7RWoH1orkAbRWN7pbUC7UNzBQIQTe2V1gq0H80VCEA0tVdaK9B+NFcgQNHQXmmtQMfQXIEARUN7pbUCHUNzBdohktsrrRXoOJor0A6R3F5prUDH0VyBdorE9kprBcyguQLtFIntldYKmEFzBTogktorrRUwh+YKdEAktVdaK2AOzRXooEhor7RWwCyaK9BBkdBeaa2AWTRXwIBwbq+0VsA8mitgQDi3V1orYB7NFTAkHNsrrRWwB80VMCQc2yutFbAHzRUwKJzaK60VsA/NFTDoZu3VsizHw6vlDLRWwD6EK2DQ8OHD/V+/9NJL8nq9evvttzVu3DgVFRU5OJn0xz/+UePGjdPbb78tr9erdevW+R9LS0tzcDIg8sQ6PQAQaY4dO6YRI0ZIkjp16uT/fkFBgf7mb/7GqbFUUFCgDz/8UA899FCz79NaAfNoroBBlmXps88+a/WxgoKCIE/TtvWfPn3a8VPWQKThgibAkOLiYj355JP68MMPW308Li5Oly5dUlJSUpAnk6qqqtS7d281Nja2+vjXvvY1bdq0idPDgCE0V8CQu+++WzU1NTd9vKGhQYWFhUGc6Jr9+/ffNFgl6auvvtLdd98dvIGACEe4AoZ07dpVO3bsuGUzderU8K3W2717d23fvl1du3YN4kRAZCNcAYOGDBmi/Pz8mz4eiuGan5+vIUOGBHEaIPIRroBhjz76qH74wx+2+tiJEyd06tSpoM5z8uRJlZSUtPrY8uXL9cgjjwR1HiAaEK6ADVatWqX777+/1cf27NkT1Flutr4HHnhA//qv/xrUWYBoQbgCNoiNjdXWrVvVv3//Gx4LhXAdMGCAtmzZothY3uoO2IG34gA2ev/99zV58uRmV+r27t1bFy5caHaDCbs0NTXpjjvu0Jdffun/XlxcnAoLCzV+/Hjb1w9EKw5bARvdd999+tnPftbsPsNffPGFDh06dNu7NfluSFFVVaW6ujpduXJFnTt3VkJCgpKSkpScnHzbDwbweDzNglWSfvaznxGsgM0IV8BmS5cu1cGDB7Vlyxb/91reCtGyLJWWlsrj8TT7V1lZedPl9ujRQ263u9m/1NTUZoHb8irhv//7v9eSJUsMbh2A1nBaGAiCmpoa3XvvvSouLpYkTZo0SYWFhTp79qw2btyojRs36vPPP7/h52JjY9WjRw8lJiaqc+fOunLlimpra1VZWdnqTSH69++vBQsWaOHCherfv78mTZqk9957T9LVDxX4wx/+wPtZgSAgXIEgKSkpUXp6uqqqqhQTE6MZM2bov/7rv9TU1CRJio+P18iRI+V2u5Weni632620tDTFxcXdsKyGhgYVFxerqKjI33KPHj2q+vp6SVc/MGDGjBl688035fV61b17d/3xj3/k/axAkBCuQBD9y7/8i1544YVm35s0aZIlyj8EAAAL7UlEQVSys7P18MMPKz4+vt3Lrq+v129/+1utX79e77777g3rff7559u9bACBIVyBILh8+bK+//3vKy8vT9LVWyV+85vf1OLFi5t9Bqwpx48f1/r16/XKK6/473e8YMECrVmzRt27dze+PgDNEa6AzQoKCjR//nz/R9EtW7ZML7zwQlBC7vLly/rnf/5nrV27VpKUnJysvLw8TZ061fZ1A9GMcAVsUl9fr6efflobN26UJKWmpmrTpk2aNGlS0Gd599139a1vfUtlZWWSpIULF2rt2rUdOg0N4OYIV8AG1dXVeuSRR7R3715JV9vqqlWrHL1St6amRs8995y/xWZmZuq3v/0tVw8DNiBcAcMqKir04IMP+t/2smPHjpA6DVtQUKBHH31UNTU1Gj9+vHbu3KmePXs6PRYQUQhXwKCvvvpKU6dO1f/8z/+od+/eevvtt/W1r33N6bFu8MEHH+jBBx/Ul19+qQkTJmj37t3q0qWL02MBEYNwBQy5cuWKZs6cqd27d6tnz57av3+/Ro0a5fRYN3XkyBHdf//9qqio0PTp0/XGG2+oc+fOTo8FRAQ+FQcw5Pnnn/c3wJ07d4Z0sErSqFGj9Pvf/15dunTRrl27tHLlSqdHAiIGzRUwoKioSOPGjVNTU5O2bdumxx57zOmR2mzbtm2aNWuWOnXqpD/84Q9KT093eiQg7NFcgQ6qr6/XvHnz1NTUpDlz5oRVsEpSVlaWZs+eraamJs2bN89/C0UA7Ue4Ah20cuVKFRcXq1+/fv63uYSbX/ziF+rXr5+Ki4tvuD0jgMBxWhjoAI/Ho3vvvVdNTU3asWOHHnnkEadHarcdO3boscceU6dOnfTBBx/I7XY7PRIQtmiuQAc8//zzampq0uzZs8M6WCXp0Ucf1eOPP66mpiYubgI6iOYKtNPJkyeVkpIiy7L0ySefaOjQoU6P1GEnTpzQsGHDFBMTo7KyMt11111OjwSEJZor0E65ubmyLEsZGRkREaySNHToUE2ZMkVer1e5ublOjwOELcIVaIf6+nr/x8dlZ2c7PI1Zvu3Jy8vjymGgnQhXoB22b9+uixcvasCAAZoxY0aHl5eZmanU1FS5XC7/v9TUVK1YscL/nNzcXLndbvXq1cv/nF69esntdvs/7caEmTNnqn///rpw4YJ27NhhbLlANCFcgXbIz8+XdPWj22JjYzu8vD179qi0tFQZGRmSpNWrV6u0tFSrV6/2P2fhwoXyeDzat2+fJKlnz5768ssv5fF4lJKS0uEZfGJjY7Vo0SJJ17YTQGAIVyBAlmXpww8/lHS15ZnkC8lbfUqN77HevXsbXff1fG38ww8/FNc8AoEjXIEAlZaWqrKyUvHx8UpLS3N6HFukpaWpc+fOqqioMHrKGYgWhCsQII/HI0kaOXKk4uLiHJ7GHp07d9bIkSMlXdteAG1HuAIB8oVNpN/ByLd9hCsQOMIVCFC0hWtRUZHDkwDhh3AFAlRSUiJJGjFihMOT2Mu3fb7tBdB2HX8PARBlvvrqK0lSUlKSbetYvXq1Xn/99VYf++KLL2xb7/V821dbWxuU9QGRhHAFAlRXVydJSkxMtG0dK1as0MKFC1t9rKysTKmpqbat28e3fb7tBdB2nBYGAtTY2ChJ6tSpk8OT2Mt3cwzf9gJoO8IVCFBCQoKkyG90vtPBvu0F0HaEKxCgaAlX3/YRrkDgCFcgQH379pUknTlzxuFJ7OXbvj59+jg8CRB+CFcgQGPGjJEU+TdX8G3f2LFjHZ4ECD+EKxAgO+9c5HubTUVFxU2f43vM7rfkRMvNMgA7EK5AgNLT0yWZDVff57lu27ZN0tW34rjd7lY/z3XKlCmSroZsr169lJmZacvN9QlXoP1cFp8nBQSkurpa3bt3l2VZOnfunO68806nRzLu3Llz+uu//mu5XC5dvnxZ3bp1c3okIKzQXIEAdevWTUOHDpUkHTx40OFp7OHbrmHDhhGsQDsQrkA7TJ8+XZK0adMmhyexh2+7fNsJIDCcFgba4cSJExo2bJhcLpfKysp09913Oz2SMSdPnlRKSoosy9KJEyc0ZMgQp0cCwg7NFWiHoUOHKiMjQ5ZlKTc31+lxjMrJyZFlWcrMzCRYgXYiXIF2ys7OliTl5eWpvr7e4WnMqK+vV15enqRr2wcgcIQr0E4zZszQgAEDdPHiRW3dutXpcYzYunWrysvLNXDgQH3jG99wehwgbBGuQDvFxsZq6dKlkqTly5ervLzc4Yk6pry8XMuXL5ckLVmyxP+pOAACxwVNQAfU19fL7XaruLhYc+bM0ebNm50eqd3mzJmjrVu3avjw4SoqKlJ8fLzTIwFhi3AFOqioqEjjxo1TU1OTduzYoUceecTpkQK2Y8cOPfbYY+rUqZM++OAD7soEdBCnhYEOSk9P99+m8Kmnngq708Pl5eVavHixJOmZZ54hWAEDaK6AAdefHn7ooYf0u9/9TnFxcU6PdVsNDQ16+OGHtXPnTk4HAwbRXAED4uPj9corryghIUE7d+7Ut771LXm9XqfHuiWv16t58+Zp586dSkhIUH5+PsEKGEK4Aoa43W5t27ZNsbGxeu2117Ro0SI1NTU5PVarmpqatGjRIm3evFmxsbHavn07p4MBgwhXwKCvf/3revXVVxUTE6O8vDw98cQTamhocHqsZq5cuaK5c+cqLy9PMTExevXVV/XQQw85PRYQUQhXwLDZs2dr69atiouL09atWzV16lSdOnXK6bEkSadOndK0adP061//WnFxcfr1r3+t2bNnOz0WEHEIV8AGs2bN0ptvvqkuXbpo//79Gj58uP+evU6wLEs5OTkaPny49u/fry5duuitt95SVlaWI/MAkY6rhQEbffrpp3ryySd14MABSVJGRoby8vJ01113BW2GU6dO6dvf/rb27dsnSZo4caI2bdqkwYMHB20GINrQXAEb3XPPPSosLNSLL76oxMRE7d27V8OHD9ePf/xjnT171tZ1nz17Vj/+8Y81fPhw7du3T4mJiXrxxRe1f/9+ghWwGc0VCJKWLbZTp056+OGHlZ2drQceeEAul6vD67AsS//93/+tdevW6Y033vBfrUxbBYKLcAWCyOv1atu2bfrlL3+pd9991//9oUOHas6cOUpPT5fb7dZf/dVftXmZ586dk8fjUVFRkbZs2aITJ074H5s8ebKys7OVlZWlmBhOVAHBQrgCDjl+/LjWr1+v//zP/1R1dXWzxwYMGCC3260xY8aoT58+SkxMVOfOnXXlyhXV1tbq0qVLOnz4sIqKivT55583+9mkpCT9wz/8gxYvXqy0tLRgbhKA/0e4Ag6rqqrSli1b9N5778nj8eiTTz4J6Kpil8ulYcOGye12a9KkSZo9e7aSkpJsnBjA7RCuQIiprq7W4cOH5fF4dOzYMVVVVamurk719fWKj49XQkKCkpKSNGLECH+77datm9NjA7gO4QoAgGFc4QAAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhhGuAAAYRrgCAGAY4QoAgGGEKwAAhv0fej2lyQEiGeIAAAAASUVORK5CYII=\n",
|
| 42 |
+
"text/plain": [
|
| 43 |
+
"<IPython.core.display.Image object>"
|
| 44 |
+
]
|
| 45 |
+
},
|
| 46 |
+
"execution_count": 4,
|
| 47 |
+
"metadata": {},
|
| 48 |
+
"output_type": "execute_result"
|
| 49 |
+
}
|
| 50 |
+
],
|
| 51 |
+
"source": [
|
| 52 |
+
"from IPython.display import Image\n",
|
| 53 |
+
"\n",
|
| 54 |
+
"Image(\"images/monty.png\")"
|
| 55 |
+
]
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "markdown",
|
| 59 |
+
"metadata": {},
|
| 60 |
+
"source": [
|
| 61 |
+
"with the following CPDs:\n",
|
| 62 |
+
"\n",
|
| 63 |
+
"<pre>\n",
|
| 64 |
+
"\n",
|
| 65 |
+
"P(C):\n",
|
| 66 |
+
"+----------+----------+-----------+-----------+\n",
|
| 67 |
+
"| C | 0 | 1 | 2 |\n",
|
| 68 |
+
"+----------+----------+-----------+-----------+\n",
|
| 69 |
+
"| | 0.33 | 0.33 | 0.33 |\n",
|
| 70 |
+
"+----------+----------+-----------+-----------+\n",
|
| 71 |
+
"\n",
|
| 72 |
+
"P(P):\n",
|
| 73 |
+
"+----------+----------+-----------+-----------+\n",
|
| 74 |
+
"| P | 0 | 1 | 2 |\n",
|
| 75 |
+
"+----------+----------+-----------+-----------+\n",
|
| 76 |
+
"| | 0.33 | 0.33 | 0.33 |\n",
|
| 77 |
+
"+----------+----------+-----------+-----------+\n",
|
| 78 |
+
"\n",
|
| 79 |
+
"P(H | P, C):\n",
|
| 80 |
+
"+------+------+------+------+------+------+------+------+------+------+\n",
|
| 81 |
+
"| C | 0 | 1 | 2 |\n",
|
| 82 |
+
"+------+------+------+------+------+------+------+------+------+------+\n",
|
| 83 |
+
"| P | 0 | 1 | 2 | 0 | 1 | 2 | 0 | 1 | 2 |\n",
|
| 84 |
+
"+------+------+------+------+------+------+------+------+------+------+\n",
|
| 85 |
+
"| H=0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 1 | 0.5 | \n",
|
| 86 |
+
"+------+------+------+------+------+------+------+------+------+------+\n",
|
| 87 |
+
"| H=1 | 0.5 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0.5 |\n",
|
| 88 |
+
"+------+------+------+------+------+------+------+------+------+------+\n",
|
| 89 |
+
"| H=2 | 0.5 | 1 | 0 | 1 | 0.5 | 0 | 0 | 0 | 0 |\n",
|
| 90 |
+
"+------+------+------+------+------+------+------+------+------+------+\n",
|
| 91 |
+
"</pre>\n",
|
| 92 |
+
"\n",
|
| 93 |
+
"Let's say that the contestant selected door 0 and the host opened door 2, we need to find the probability of the prize i.e. $P(P|H=2, C=0)$."
|
| 94 |
+
]
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"cell_type": "code",
|
| 98 |
+
"execution_count": 5,
|
| 99 |
+
"metadata": {},
|
| 100 |
+
"outputs": [
|
| 101 |
+
{
|
| 102 |
+
"name": "stderr",
|
| 103 |
+
"output_type": "stream",
|
| 104 |
+
"text": [
|
| 105 |
+
"Finding Elimination Order: : : 0it [00:19, ?it/s]\n"
|
| 106 |
+
]
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"data": {
|
| 110 |
+
"text/plain": [
|
| 111 |
+
"[<TabularCPD representing P(C:3) at 0x7f580a175310>,\n",
|
| 112 |
+
" <TabularCPD representing P(P:3) at 0x7f58128ad520>,\n",
|
| 113 |
+
" <TabularCPD representing P(H:3 | C:3, P:3) at 0x7f580a175340>]"
|
| 114 |
+
]
|
| 115 |
+
},
|
| 116 |
+
"execution_count": 5,
|
| 117 |
+
"metadata": {},
|
| 118 |
+
"output_type": "execute_result"
|
| 119 |
+
}
|
| 120 |
+
],
|
| 121 |
+
"source": [
|
| 122 |
+
"from pgmpy.models import BayesianNetwork\n",
|
| 123 |
+
"from pgmpy.factors.discrete import TabularCPD\n",
|
| 124 |
+
"\n",
|
| 125 |
+
"# Defining the network structure\n",
|
| 126 |
+
"model = BayesianNetwork([(\"C\", \"H\"), (\"P\", \"H\")])\n",
|
| 127 |
+
"\n",
|
| 128 |
+
"# Defining the CPDs:\n",
|
| 129 |
+
"cpd_c = TabularCPD(\"C\", 3, [[0.33], [0.33], [0.33]])\n",
|
| 130 |
+
"cpd_p = TabularCPD(\"P\", 3, [[0.33], [0.33], [0.33]])\n",
|
| 131 |
+
"cpd_h = TabularCPD(\n",
|
| 132 |
+
" \"H\",\n",
|
| 133 |
+
" 3,\n",
|
| 134 |
+
" [\n",
|
| 135 |
+
" [0, 0, 0, 0, 0.5, 1, 0, 1, 0.5],\n",
|
| 136 |
+
" [0.5, 0, 1, 0, 0, 0, 1, 0, 0.5],\n",
|
| 137 |
+
" [0.5, 1, 0, 1, 0.5, 0, 0, 0, 0],\n",
|
| 138 |
+
" ],\n",
|
| 139 |
+
" evidence=[\"C\", \"P\"],\n",
|
| 140 |
+
" evidence_card=[3, 3],\n",
|
| 141 |
+
")\n",
|
| 142 |
+
"\n",
|
| 143 |
+
"# Associating the CPDs with the network structure.\n",
|
| 144 |
+
"model.add_cpds(cpd_c, cpd_p, cpd_h)\n",
|
| 145 |
+
"\n",
|
| 146 |
+
"# Some other methods\n",
|
| 147 |
+
"model.get_cpds()"
|
| 148 |
+
]
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"cell_type": "code",
|
| 152 |
+
"execution_count": 6,
|
| 153 |
+
"metadata": {},
|
| 154 |
+
"outputs": [
|
| 155 |
+
{
|
| 156 |
+
"data": {
|
| 157 |
+
"text/plain": [
|
| 158 |
+
"True"
|
| 159 |
+
]
|
| 160 |
+
},
|
| 161 |
+
"execution_count": 6,
|
| 162 |
+
"metadata": {},
|
| 163 |
+
"output_type": "execute_result"
|
| 164 |
+
}
|
| 165 |
+
],
|
| 166 |
+
"source": [
|
| 167 |
+
"# check_model check for the model structure and the associated CPD and returns True if everything is correct otherwise throws an exception\n",
|
| 168 |
+
"model.check_model()"
|
| 169 |
+
]
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"cell_type": "code",
|
| 173 |
+
"execution_count": 7,
|
| 174 |
+
"metadata": {},
|
| 175 |
+
"outputs": [
|
| 176 |
+
{
|
| 177 |
+
"name": "stderr",
|
| 178 |
+
"output_type": "stream",
|
| 179 |
+
"text": [
|
| 180 |
+
"Finding Elimination Order: : : 0it [00:00, ?it/s]\n",
|
| 181 |
+
"0it [00:00, ?it/s]\u001b[A"
|
| 182 |
+
]
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"name": "stdout",
|
| 186 |
+
"output_type": "stream",
|
| 187 |
+
"text": [
|
| 188 |
+
"+------+----------+\n",
|
| 189 |
+
"| P | phi(P) |\n",
|
| 190 |
+
"+======+==========+\n",
|
| 191 |
+
"| P(0) | 0.3333 |\n",
|
| 192 |
+
"+------+----------+\n",
|
| 193 |
+
"| P(1) | 0.6667 |\n",
|
| 194 |
+
"+------+----------+\n",
|
| 195 |
+
"| P(2) | 0.0000 |\n",
|
| 196 |
+
"+------+----------+\n"
|
| 197 |
+
]
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"name": "stderr",
|
| 201 |
+
"output_type": "stream",
|
| 202 |
+
"text": [
|
| 203 |
+
"\n"
|
| 204 |
+
]
|
| 205 |
+
}
|
| 206 |
+
],
|
| 207 |
+
"source": [
|
| 208 |
+
"# Infering the posterior probability\n",
|
| 209 |
+
"from pgmpy.inference import VariableElimination\n",
|
| 210 |
+
"\n",
|
| 211 |
+
"infer = VariableElimination(model)\n",
|
| 212 |
+
"posterior_p = infer.query([\"P\"], evidence={\"C\": 0, \"H\": 2})\n",
|
| 213 |
+
"print(posterior_p)"
|
| 214 |
+
]
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"cell_type": "markdown",
|
| 218 |
+
"metadata": {},
|
| 219 |
+
"source": [
|
| 220 |
+
"We see that the posterior probability of having the prize behind door 1 is more that door 0. Therefore the contestant should switch the door."
|
| 221 |
+
]
|
| 222 |
+
}
|
| 223 |
+
],
|
| 224 |
+
"metadata": {
|
| 225 |
+
"anaconda-cloud": {},
|
| 226 |
+
"kernelspec": {
|
| 227 |
+
"display_name": "Python 3",
|
| 228 |
+
"language": "python",
|
| 229 |
+
"name": "python3"
|
| 230 |
+
},
|
| 231 |
+
"language_info": {
|
| 232 |
+
"codemirror_mode": {
|
| 233 |
+
"name": "ipython",
|
| 234 |
+
"version": 3
|
| 235 |
+
},
|
| 236 |
+
"file_extension": ".py",
|
| 237 |
+
"mimetype": "text/x-python",
|
| 238 |
+
"name": "python",
|
| 239 |
+
"nbconvert_exporter": "python",
|
| 240 |
+
"pygments_lexer": "ipython3",
|
| 241 |
+
"version": "3.8.10"
|
| 242 |
+
}
|
| 243 |
+
},
|
| 244 |
+
"nbformat": 4,
|
| 245 |
+
"nbformat_minor": 4
|
| 246 |
+
}
|
testbed/pgmpy__pgmpy/funding.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": "v1.0.0",
|
| 3 |
+
"entity": {
|
| 4 |
+
"type": "individual",
|
| 5 |
+
"role": "maintainer",
|
| 6 |
+
"name": "Ankur Ankan",
|
| 7 |
+
"email": "ankurankan@gmail.com",
|
| 8 |
+
"description": "I lead the development and maintain the pgmpy package.",
|
| 9 |
+
"webpageUrl": {
|
| 10 |
+
"url": "https://github.com/pgmpy/pgmpy"
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"projects": [{
|
| 14 |
+
"guid": "pgmpy",
|
| 15 |
+
"name": "pgmpy",
|
| 16 |
+
"description": "pgmpy is a Python package for learning (Structure and Parameter), inference (Probabilistic and Causal), and simulations in Bayesian Networks.",
|
| 17 |
+
"webpageUrl": {
|
| 18 |
+
"url": "https://github.com/pgmpy/pgmpy"
|
| 19 |
+
},
|
| 20 |
+
"repositoryUrl": {
|
| 21 |
+
"url": "https://github.com/pgmpy/pgmpy"
|
| 22 |
+
},
|
| 23 |
+
"licenses": ["spdx:MIT"],
|
| 24 |
+
"tags": ["machine-learning", "causal-inference", "bayesian-networks", "statistics", "python"]
|
| 25 |
+
}],
|
| 26 |
+
|
| 27 |
+
"funding": {
|
| 28 |
+
"channels": [{
|
| 29 |
+
"guid": "github",
|
| 30 |
+
"type": "other",
|
| 31 |
+
"address": "https://github.com/sponsors/pgmpy",
|
| 32 |
+
"description": "Github Sponsor"
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"guid": "bank",
|
| 36 |
+
"type": "bank",
|
| 37 |
+
"description": "Please reach out for bank details"
|
| 38 |
+
}
|
| 39 |
+
],
|
| 40 |
+
|
| 41 |
+
"plans": [{
|
| 42 |
+
"guid": "dev-part",
|
| 43 |
+
"status": "active",
|
| 44 |
+
"name": "Part-time Developer",
|
| 45 |
+
"description": "This would go towards funding a part-time developer",
|
| 46 |
+
"amount": 25000,
|
| 47 |
+
"currency": "USD",
|
| 48 |
+
"frequency": "yearly",
|
| 49 |
+
"channels": ["github", "bank"]
|
| 50 |
+
}]
|
| 51 |
+
}
|
| 52 |
+
}
|
testbed/pgmpy__pgmpy/meta.yaml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{% set name = "pgmpy" %}
|
| 2 |
+
{% set version = "0.1.26" %}
|
| 3 |
+
|
| 4 |
+
package:
|
| 5 |
+
name: "{{ name|lower }}"
|
| 6 |
+
version: "{{ version }}"
|
| 7 |
+
|
| 8 |
+
source:
|
| 9 |
+
url: https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/{{ name }}-{{ version }}.tar.gz
|
| 10 |
+
#sha256: 668a34220342f211d4112c48b64ad696a3592b4758f9e3aac3553f968ed231b7
|
| 11 |
+
|
| 12 |
+
build:
|
| 13 |
+
number: 0
|
| 14 |
+
script: "{{ PYTHON }} -m pip install . --ignore-installed -vv"
|
| 15 |
+
|
| 16 |
+
requirements:
|
| 17 |
+
host:
|
| 18 |
+
- pip
|
| 19 |
+
- python
|
| 20 |
+
- pytorch
|
| 21 |
+
run:
|
| 22 |
+
- python
|
| 23 |
+
- networkx
|
| 24 |
+
- numpy
|
| 25 |
+
- scipy
|
| 26 |
+
- pandas
|
| 27 |
+
- pyparsing
|
| 28 |
+
- pytorch
|
| 29 |
+
- statsmodels
|
| 30 |
+
- tqdm
|
| 31 |
+
- joblib
|
| 32 |
+
- opt_einsum
|
| 33 |
+
|
| 34 |
+
test:
|
| 35 |
+
imports:
|
| 36 |
+
- pgmpy
|
| 37 |
+
|
| 38 |
+
about:
|
| 39 |
+
home: http://pgmpy.org/
|
| 40 |
+
license: MIT
|
| 41 |
+
license_family: MIT
|
| 42 |
+
summary: Python Library for Probabilistic Graphical Models
|
testbed/pgmpy__pgmpy/pgmpy/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .global_vars import config
|
| 2 |
+
|
| 3 |
+
__all__ = ["config", "logger"]
|
| 4 |
+
__version__ = "0.1.26"
|
testbed/pgmpy__pgmpy/pgmpy/base/DAG.py
ADDED
|
@@ -0,0 +1,1319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
import itertools
|
| 4 |
+
|
| 5 |
+
import networkx as nx
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pandas as pd
|
| 8 |
+
|
| 9 |
+
from pgmpy.base import UndirectedGraph
|
| 10 |
+
from pgmpy.global_vars import logger
|
| 11 |
+
from pgmpy.independencies import Independencies
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class DAG(nx.DiGraph):
|
| 15 |
+
"""
|
| 16 |
+
Base class for all Directed Graphical Models.
|
| 17 |
+
|
| 18 |
+
Each node in the graph can represent either a random variable, `Factor`,
|
| 19 |
+
or a cluster of random variables. Edges in the graph represent the
|
| 20 |
+
dependencies between these.
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
data: input graph
|
| 25 |
+
Data to initialize graph. If data=None (default) an empty graph is
|
| 26 |
+
created. The data can be an edge list or any Networkx graph object.
|
| 27 |
+
|
| 28 |
+
Examples
|
| 29 |
+
--------
|
| 30 |
+
Create an empty DAG with no nodes and no edges
|
| 31 |
+
|
| 32 |
+
>>> from pgmpy.base import DAG
|
| 33 |
+
>>> G = DAG()
|
| 34 |
+
|
| 35 |
+
G can be grown in several ways:
|
| 36 |
+
|
| 37 |
+
**Nodes:**
|
| 38 |
+
|
| 39 |
+
Add one node at a time:
|
| 40 |
+
|
| 41 |
+
>>> G.add_node(node='a')
|
| 42 |
+
|
| 43 |
+
Add the nodes from any container (a list, set or tuple or the nodes
|
| 44 |
+
from another graph).
|
| 45 |
+
|
| 46 |
+
>>> G.add_nodes_from(nodes=['a', 'b'])
|
| 47 |
+
|
| 48 |
+
**Edges:**
|
| 49 |
+
|
| 50 |
+
G can also be grown by adding edges.
|
| 51 |
+
|
| 52 |
+
Add one edge,
|
| 53 |
+
|
| 54 |
+
>>> G.add_edge(u='a', v='b')
|
| 55 |
+
|
| 56 |
+
a list of edges,
|
| 57 |
+
|
| 58 |
+
>>> G.add_edges_from(ebunch=[('a', 'b'), ('b', 'c')])
|
| 59 |
+
|
| 60 |
+
If some edges connect nodes not yet in the model, the nodes
|
| 61 |
+
are added automatically. There are no errors when adding
|
| 62 |
+
nodes or edges that already exist.
|
| 63 |
+
|
| 64 |
+
**Shortcuts:**
|
| 65 |
+
|
| 66 |
+
Many common graph features allow python syntax for speed reporting.
|
| 67 |
+
|
| 68 |
+
>>> 'a' in G # check if node in graph
|
| 69 |
+
True
|
| 70 |
+
>>> len(G) # number of nodes in graph
|
| 71 |
+
3
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(self, ebunch=None, latents=set()):
|
| 75 |
+
super(DAG, self).__init__(ebunch)
|
| 76 |
+
self.latents = set(latents)
|
| 77 |
+
cycles = []
|
| 78 |
+
try:
|
| 79 |
+
cycles = list(nx.find_cycle(self))
|
| 80 |
+
except nx.NetworkXNoCycle:
|
| 81 |
+
pass
|
| 82 |
+
else:
|
| 83 |
+
out_str = "Cycles are not allowed in a DAG."
|
| 84 |
+
out_str += "\nEdges indicating the path taken for a loop: "
|
| 85 |
+
out_str += "".join([f"({u},{v}) " for (u, v) in cycles])
|
| 86 |
+
raise ValueError(out_str)
|
| 87 |
+
|
| 88 |
+
def add_node(self, node, weight=None, latent=False):
|
| 89 |
+
"""
|
| 90 |
+
Adds a single node to the Graph.
|
| 91 |
+
|
| 92 |
+
Parameters
|
| 93 |
+
----------
|
| 94 |
+
node: str, int, or any hashable python object.
|
| 95 |
+
The node to add to the graph.
|
| 96 |
+
|
| 97 |
+
weight: int, float
|
| 98 |
+
The weight of the node.
|
| 99 |
+
|
| 100 |
+
latent: boolean (default: False)
|
| 101 |
+
Specifies whether the variable is latent or not.
|
| 102 |
+
|
| 103 |
+
Examples
|
| 104 |
+
--------
|
| 105 |
+
>>> from pgmpy.base import DAG
|
| 106 |
+
>>> G = DAG()
|
| 107 |
+
>>> G.add_node(node='A')
|
| 108 |
+
>>> sorted(G.nodes())
|
| 109 |
+
['A']
|
| 110 |
+
|
| 111 |
+
Adding a node with some weight.
|
| 112 |
+
>>> G.add_node(node='B', weight=0.3)
|
| 113 |
+
|
| 114 |
+
The weight of these nodes can be accessed as:
|
| 115 |
+
>>> G.nodes['B']
|
| 116 |
+
{'weight': 0.3}
|
| 117 |
+
>>> G.nodes['A']
|
| 118 |
+
{'weight': None}
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
# Check for networkx 2.0 syntax
|
| 122 |
+
if isinstance(node, tuple) and len(node) == 2 and isinstance(node[1], dict):
|
| 123 |
+
node, attrs = node
|
| 124 |
+
if attrs.get("weight", None) is not None:
|
| 125 |
+
attrs["weight"] = weight
|
| 126 |
+
else:
|
| 127 |
+
attrs = {"weight": weight}
|
| 128 |
+
|
| 129 |
+
if latent:
|
| 130 |
+
self.latents.add(node)
|
| 131 |
+
|
| 132 |
+
super(DAG, self).add_node(node, weight=weight)
|
| 133 |
+
|
| 134 |
+
def add_nodes_from(self, nodes, weights=None, latent=False):
|
| 135 |
+
"""
|
| 136 |
+
Add multiple nodes to the Graph.
|
| 137 |
+
|
| 138 |
+
**The behviour of adding weights is different than in networkx.
|
| 139 |
+
|
| 140 |
+
Parameters
|
| 141 |
+
----------
|
| 142 |
+
nodes: iterable container
|
| 143 |
+
A container of nodes (list, dict, set, or any hashable python
|
| 144 |
+
object).
|
| 145 |
+
|
| 146 |
+
weights: list, tuple (default=None)
|
| 147 |
+
A container of weights (int, float). The weight value at index i
|
| 148 |
+
is associated with the variable at index i.
|
| 149 |
+
|
| 150 |
+
latent: list, tuple (default=False)
|
| 151 |
+
A container of boolean. The value at index i tells whether the
|
| 152 |
+
node at index i is latent or not.
|
| 153 |
+
|
| 154 |
+
Examples
|
| 155 |
+
--------
|
| 156 |
+
>>> from pgmpy.base import DAG
|
| 157 |
+
>>> G = DAG()
|
| 158 |
+
>>> G.add_nodes_from(nodes=['A', 'B', 'C'])
|
| 159 |
+
>>> G.nodes()
|
| 160 |
+
NodeView(('A', 'B', 'C'))
|
| 161 |
+
|
| 162 |
+
Adding nodes with weights:
|
| 163 |
+
>>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6])
|
| 164 |
+
>>> G.nodes['D']
|
| 165 |
+
{'weight': 0.3}
|
| 166 |
+
>>> G.nodes['E']
|
| 167 |
+
{'weight': 0.6}
|
| 168 |
+
>>> G.nodes['A']
|
| 169 |
+
{'weight': None}
|
| 170 |
+
"""
|
| 171 |
+
nodes = list(nodes)
|
| 172 |
+
|
| 173 |
+
if isinstance(latent, bool):
|
| 174 |
+
latent = [latent] * len(nodes)
|
| 175 |
+
|
| 176 |
+
if weights:
|
| 177 |
+
if len(nodes) != len(weights):
|
| 178 |
+
raise ValueError(
|
| 179 |
+
"The number of elements in nodes and weights" "should be equal."
|
| 180 |
+
)
|
| 181 |
+
for index in range(len(nodes)):
|
| 182 |
+
self.add_node(
|
| 183 |
+
node=nodes[index], weight=weights[index], latent=latent[index]
|
| 184 |
+
)
|
| 185 |
+
else:
|
| 186 |
+
for index in range(len(nodes)):
|
| 187 |
+
self.add_node(node=nodes[index], latent=latent[index])
|
| 188 |
+
|
| 189 |
+
def add_edge(self, u, v, weight=None):
|
| 190 |
+
"""
|
| 191 |
+
Add an edge between u and v.
|
| 192 |
+
|
| 193 |
+
The nodes u and v will be automatically added if they are
|
| 194 |
+
not already in the graph.
|
| 195 |
+
|
| 196 |
+
Parameters
|
| 197 |
+
----------
|
| 198 |
+
u, v : nodes
|
| 199 |
+
Nodes can be any hashable Python object.
|
| 200 |
+
|
| 201 |
+
weight: int, float (default=None)
|
| 202 |
+
The weight of the edge
|
| 203 |
+
|
| 204 |
+
Examples
|
| 205 |
+
--------
|
| 206 |
+
>>> from pgmpy.base import DAG
|
| 207 |
+
>>> G = DAG()
|
| 208 |
+
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
|
| 209 |
+
>>> G.add_edge(u='Alice', v='Bob')
|
| 210 |
+
>>> G.nodes()
|
| 211 |
+
NodeView(('Alice', 'Bob', 'Charles'))
|
| 212 |
+
>>> G.edges()
|
| 213 |
+
OutEdgeView([('Alice', 'Bob')])
|
| 214 |
+
|
| 215 |
+
When the node is not already present in the graph:
|
| 216 |
+
>>> G.add_edge(u='Alice', v='Ankur')
|
| 217 |
+
>>> G.nodes()
|
| 218 |
+
NodeView(('Alice', 'Ankur', 'Bob', 'Charles'))
|
| 219 |
+
>>> G.edges()
|
| 220 |
+
OutEdgeView([('Alice', 'Bob'), ('Alice', 'Ankur')])
|
| 221 |
+
|
| 222 |
+
Adding edges with weight:
|
| 223 |
+
>>> G.add_edge('Ankur', 'Maria', weight=0.1)
|
| 224 |
+
>>> G.edge['Ankur']['Maria']
|
| 225 |
+
{'weight': 0.1}
|
| 226 |
+
"""
|
| 227 |
+
super(DAG, self).add_edge(u, v, weight=weight)
|
| 228 |
+
|
| 229 |
+
def add_edges_from(self, ebunch, weights=None):
|
| 230 |
+
"""
|
| 231 |
+
Add all the edges in ebunch.
|
| 232 |
+
|
| 233 |
+
If nodes referred in the ebunch are not already present, they
|
| 234 |
+
will be automatically added. Node names can be any hashable python
|
| 235 |
+
object.
|
| 236 |
+
|
| 237 |
+
**The behavior of adding weights is different than networkx.
|
| 238 |
+
|
| 239 |
+
Parameters
|
| 240 |
+
----------
|
| 241 |
+
ebunch : container of edges
|
| 242 |
+
Each edge given in the container will be added to the graph.
|
| 243 |
+
The edges must be given as 2-tuples (u, v).
|
| 244 |
+
|
| 245 |
+
weights: list, tuple (default=None)
|
| 246 |
+
A container of weights (int, float). The weight value at index i
|
| 247 |
+
is associated with the edge at index i.
|
| 248 |
+
|
| 249 |
+
Examples
|
| 250 |
+
--------
|
| 251 |
+
>>> from pgmpy.base import DAG
|
| 252 |
+
>>> G = DAG()
|
| 253 |
+
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
|
| 254 |
+
>>> G.add_edges_from(ebunch=[('Alice', 'Bob'), ('Bob', 'Charles')])
|
| 255 |
+
>>> G.nodes()
|
| 256 |
+
NodeView(('Alice', 'Bob', 'Charles'))
|
| 257 |
+
>>> G.edges()
|
| 258 |
+
OutEdgeView([('Alice', 'Bob'), ('Bob', 'Charles')])
|
| 259 |
+
|
| 260 |
+
When the node is not already in the model:
|
| 261 |
+
>>> G.add_edges_from(ebunch=[('Alice', 'Ankur')])
|
| 262 |
+
>>> G.nodes()
|
| 263 |
+
NodeView(('Alice', 'Bob', 'Charles', 'Ankur'))
|
| 264 |
+
>>> G.edges()
|
| 265 |
+
OutEdgeView([('Alice', 'Bob'), ('Bob', 'Charles'), ('Alice', 'Ankur')])
|
| 266 |
+
|
| 267 |
+
Adding edges with weights:
|
| 268 |
+
>>> G.add_edges_from([('Ankur', 'Maria'), ('Maria', 'Mason')],
|
| 269 |
+
... weights=[0.3, 0.5])
|
| 270 |
+
>>> G.edge['Ankur']['Maria']
|
| 271 |
+
{'weight': 0.3}
|
| 272 |
+
>>> G.edge['Maria']['Mason']
|
| 273 |
+
{'weight': 0.5}
|
| 274 |
+
"""
|
| 275 |
+
ebunch = list(ebunch)
|
| 276 |
+
|
| 277 |
+
if weights:
|
| 278 |
+
if len(ebunch) != len(weights):
|
| 279 |
+
raise ValueError(
|
| 280 |
+
"The number of elements in ebunch and weights" "should be equal"
|
| 281 |
+
)
|
| 282 |
+
for index in range(len(ebunch)):
|
| 283 |
+
self.add_edge(ebunch[index][0], ebunch[index][1], weight=weights[index])
|
| 284 |
+
else:
|
| 285 |
+
for edge in ebunch:
|
| 286 |
+
self.add_edge(edge[0], edge[1])
|
| 287 |
+
|
| 288 |
+
def get_parents(self, node):
|
| 289 |
+
"""
|
| 290 |
+
Returns a list of parents of node.
|
| 291 |
+
|
| 292 |
+
Throws an error if the node is not present in the graph.
|
| 293 |
+
|
| 294 |
+
Parameters
|
| 295 |
+
----------
|
| 296 |
+
node: string, int or any hashable python object.
|
| 297 |
+
The node whose parents would be returned.
|
| 298 |
+
|
| 299 |
+
Examples
|
| 300 |
+
--------
|
| 301 |
+
>>> from pgmpy.base import DAG
|
| 302 |
+
>>> G = DAG(ebunch=[('diff', 'grade'), ('intel', 'grade')])
|
| 303 |
+
>>> G.get_parents(node='grade')
|
| 304 |
+
['diff', 'intel']
|
| 305 |
+
"""
|
| 306 |
+
return list(self.predecessors(node))
|
| 307 |
+
|
| 308 |
+
def moralize(self):
|
| 309 |
+
"""
|
| 310 |
+
Removes all the immoralities in the DAG and creates a moral
|
| 311 |
+
graph (UndirectedGraph).
|
| 312 |
+
|
| 313 |
+
A v-structure X->Z<-Y is an immorality if there is no directed edge
|
| 314 |
+
between X and Y.
|
| 315 |
+
|
| 316 |
+
Examples
|
| 317 |
+
--------
|
| 318 |
+
>>> from pgmpy.base import DAG
|
| 319 |
+
>>> G = DAG(ebunch=[('diff', 'grade'), ('intel', 'grade')])
|
| 320 |
+
>>> moral_graph = G.moralize()
|
| 321 |
+
>>> moral_graph.edges()
|
| 322 |
+
EdgeView([('intel', 'grade'), ('intel', 'diff'), ('grade', 'diff')])
|
| 323 |
+
"""
|
| 324 |
+
moral_graph = UndirectedGraph()
|
| 325 |
+
moral_graph.add_nodes_from(self.nodes())
|
| 326 |
+
moral_graph.add_edges_from(self.to_undirected().edges())
|
| 327 |
+
|
| 328 |
+
for node in self.nodes():
|
| 329 |
+
moral_graph.add_edges_from(
|
| 330 |
+
itertools.combinations(self.get_parents(node), 2)
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
return moral_graph
|
| 334 |
+
|
| 335 |
+
def get_leaves(self):
|
| 336 |
+
"""
|
| 337 |
+
Returns a list of leaves of the graph.
|
| 338 |
+
|
| 339 |
+
Examples
|
| 340 |
+
--------
|
| 341 |
+
>>> from pgmpy.base import DAG
|
| 342 |
+
>>> graph = DAG([('A', 'B'), ('B', 'C'), ('B', 'D')])
|
| 343 |
+
>>> graph.get_leaves()
|
| 344 |
+
['C', 'D']
|
| 345 |
+
"""
|
| 346 |
+
return [node for node, out_degree in self.out_degree_iter() if out_degree == 0]
|
| 347 |
+
|
| 348 |
+
def out_degree_iter(self, nbunch=None, weight=None):
|
| 349 |
+
if nx.__version__.startswith("1"):
|
| 350 |
+
return super(DAG, self).out_degree_iter(nbunch, weight)
|
| 351 |
+
else:
|
| 352 |
+
return iter(self.out_degree(nbunch, weight))
|
| 353 |
+
|
| 354 |
+
def in_degree_iter(self, nbunch=None, weight=None):
|
| 355 |
+
if nx.__version__.startswith("1"):
|
| 356 |
+
return super(DAG, self).in_degree_iter(nbunch, weight)
|
| 357 |
+
else:
|
| 358 |
+
return iter(self.in_degree(nbunch, weight))
|
| 359 |
+
|
| 360 |
+
def get_roots(self):
|
| 361 |
+
"""
|
| 362 |
+
Returns a list of roots of the graph.
|
| 363 |
+
|
| 364 |
+
Examples
|
| 365 |
+
--------
|
| 366 |
+
>>> from pgmpy.base import DAG
|
| 367 |
+
>>> graph = DAG([('A', 'B'), ('B', 'C'), ('B', 'D'), ('E', 'B')])
|
| 368 |
+
>>> graph.get_roots()
|
| 369 |
+
['A', 'E']
|
| 370 |
+
"""
|
| 371 |
+
return [
|
| 372 |
+
node for node, in_degree in dict(self.in_degree()).items() if in_degree == 0
|
| 373 |
+
]
|
| 374 |
+
|
| 375 |
+
def get_children(self, node):
|
| 376 |
+
"""
|
| 377 |
+
Returns a list of children of node.
|
| 378 |
+
Throws an error if the node is not present in the graph.
|
| 379 |
+
|
| 380 |
+
Parameters
|
| 381 |
+
----------
|
| 382 |
+
node: string, int or any hashable python object.
|
| 383 |
+
The node whose children would be returned.
|
| 384 |
+
|
| 385 |
+
Examples
|
| 386 |
+
--------
|
| 387 |
+
>>> from pgmpy.base import DAG
|
| 388 |
+
>>> g = DAG(ebunch=[('A', 'B'), ('C', 'B'), ('B', 'D'),
|
| 389 |
+
('B', 'E'), ('B', 'F'), ('E', 'G')])
|
| 390 |
+
>>> g.get_children(node='B')
|
| 391 |
+
['D', 'E', 'F']
|
| 392 |
+
"""
|
| 393 |
+
return list(self.successors(node))
|
| 394 |
+
|
| 395 |
+
def get_independencies(self, latex=False, include_latents=False):
|
| 396 |
+
"""
|
| 397 |
+
Computes independencies in the DAG, by checking minimal d-seperation.
|
| 398 |
+
|
| 399 |
+
Parameters
|
| 400 |
+
----------
|
| 401 |
+
latex: boolean
|
| 402 |
+
If latex=True then latex string of the independence assertion
|
| 403 |
+
would be created.
|
| 404 |
+
|
| 405 |
+
include_latents: boolean
|
| 406 |
+
If True, includes latent variables in the independencies. Otherwise,
|
| 407 |
+
only generates independencies on observed variables.
|
| 408 |
+
|
| 409 |
+
Examples
|
| 410 |
+
--------
|
| 411 |
+
>>> from pgmpy.base import DAG
|
| 412 |
+
>>> chain = DAG([('X', 'Y'), ('Y', 'Z')])
|
| 413 |
+
>>> chain.get_independencies()
|
| 414 |
+
(X \u27C2 Z | Y)
|
| 415 |
+
"""
|
| 416 |
+
nodes = set(self.nodes())
|
| 417 |
+
if not include_latents:
|
| 418 |
+
nodes -= self.latents
|
| 419 |
+
|
| 420 |
+
independencies = Independencies()
|
| 421 |
+
for x, y in itertools.combinations(nodes, 2):
|
| 422 |
+
if not self.has_edge(x, y) and not self.has_edge(y, x):
|
| 423 |
+
minimal_separator = self.minimal_dseparator(
|
| 424 |
+
start=x, end=y, include_latents=include_latents
|
| 425 |
+
)
|
| 426 |
+
if minimal_separator is not None:
|
| 427 |
+
independencies.add_assertions([x, y, minimal_separator])
|
| 428 |
+
|
| 429 |
+
independencies = independencies.reduce()
|
| 430 |
+
|
| 431 |
+
if not latex:
|
| 432 |
+
return independencies
|
| 433 |
+
else:
|
| 434 |
+
return independencies.latex_string()
|
| 435 |
+
|
| 436 |
+
def local_independencies(self, variables):
|
| 437 |
+
"""
|
| 438 |
+
Returns an instance of Independencies containing the local independencies
|
| 439 |
+
of each of the variables.
|
| 440 |
+
|
| 441 |
+
Parameters
|
| 442 |
+
----------
|
| 443 |
+
variables: str or array like
|
| 444 |
+
variables whose local independencies are to be found.
|
| 445 |
+
|
| 446 |
+
Examples
|
| 447 |
+
--------
|
| 448 |
+
>>> from pgmpy.base import DAG
|
| 449 |
+
>>> student = DAG()
|
| 450 |
+
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
|
| 451 |
+
>>> ('grade', 'letter'), ('intel', 'SAT')])
|
| 452 |
+
>>> ind = student.local_independencies('grade')
|
| 453 |
+
>>> ind
|
| 454 |
+
(grade \u27C2 SAT | diff, intel)
|
| 455 |
+
"""
|
| 456 |
+
|
| 457 |
+
independencies = Independencies()
|
| 458 |
+
for variable in (
|
| 459 |
+
variables if isinstance(variables, (list, tuple)) else [variables]
|
| 460 |
+
):
|
| 461 |
+
non_descendents = (
|
| 462 |
+
set(self.nodes())
|
| 463 |
+
- {variable}
|
| 464 |
+
- set(nx.dfs_preorder_nodes(self, variable))
|
| 465 |
+
)
|
| 466 |
+
parents = set(self.get_parents(variable))
|
| 467 |
+
if non_descendents - parents:
|
| 468 |
+
independencies.add_assertions(
|
| 469 |
+
[variable, non_descendents - parents, parents]
|
| 470 |
+
)
|
| 471 |
+
return independencies
|
| 472 |
+
|
| 473 |
+
def is_iequivalent(self, model):
|
| 474 |
+
"""
|
| 475 |
+
Checks whether the given model is I-equivalent
|
| 476 |
+
|
| 477 |
+
Two graphs G1 and G2 are said to be I-equivalent if they have same skeleton
|
| 478 |
+
and have same set of immoralities.
|
| 479 |
+
|
| 480 |
+
Parameters
|
| 481 |
+
----------
|
| 482 |
+
model : A DAG object, for which you want to check I-equivalence
|
| 483 |
+
|
| 484 |
+
Returns
|
| 485 |
+
--------
|
| 486 |
+
I-equivalence: boolean
|
| 487 |
+
True if both are I-equivalent, False otherwise
|
| 488 |
+
|
| 489 |
+
Examples
|
| 490 |
+
--------
|
| 491 |
+
>>> from pgmpy.base import DAG
|
| 492 |
+
>>> G = DAG()
|
| 493 |
+
>>> G.add_edges_from([('V', 'W'), ('W', 'X'),
|
| 494 |
+
... ('X', 'Y'), ('Z', 'Y')])
|
| 495 |
+
>>> G1 = DAG()
|
| 496 |
+
>>> G1.add_edges_from([('W', 'V'), ('X', 'W'),
|
| 497 |
+
... ('X', 'Y'), ('Z', 'Y')])
|
| 498 |
+
>>> G.is_iequivalent(G1)
|
| 499 |
+
True
|
| 500 |
+
|
| 501 |
+
"""
|
| 502 |
+
if not isinstance(model, DAG):
|
| 503 |
+
raise TypeError(
|
| 504 |
+
f"Model must be an instance of DAG. Got type: {type(model)}"
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
if (self.to_undirected().edges() == model.to_undirected().edges()) and (
|
| 508 |
+
self.get_immoralities() == model.get_immoralities()
|
| 509 |
+
):
|
| 510 |
+
return True
|
| 511 |
+
return False
|
| 512 |
+
|
| 513 |
+
def get_immoralities(self):
|
| 514 |
+
"""
|
| 515 |
+
Finds all the immoralities in the model
|
| 516 |
+
A v-structure X -> Z <- Y is an immorality if there is no direct edge between X and Y .
|
| 517 |
+
|
| 518 |
+
Returns
|
| 519 |
+
-------
|
| 520 |
+
Immoralities: set
|
| 521 |
+
A set of all the immoralities in the model
|
| 522 |
+
|
| 523 |
+
Examples
|
| 524 |
+
---------
|
| 525 |
+
>>> from pgmpy.base import DAG
|
| 526 |
+
>>> student = DAG()
|
| 527 |
+
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
|
| 528 |
+
... ('intel', 'SAT'), ('grade', 'letter')])
|
| 529 |
+
>>> student.get_immoralities()
|
| 530 |
+
{('diff', 'intel')}
|
| 531 |
+
"""
|
| 532 |
+
immoralities = dict()
|
| 533 |
+
for node in self.nodes():
|
| 534 |
+
parent_pairs = []
|
| 535 |
+
for parents in itertools.combinations(self.predecessors(node), 2):
|
| 536 |
+
if not self.has_edge(parents[0], parents[1]) and not self.has_edge(
|
| 537 |
+
parents[1], parents[0]
|
| 538 |
+
):
|
| 539 |
+
parent_pairs.append(tuple(sorted(parents)))
|
| 540 |
+
immoralities[node] = parent_pairs
|
| 541 |
+
return immoralities
|
| 542 |
+
|
| 543 |
+
def is_dconnected(self, start, end, observed=None, include_latents=False):
|
| 544 |
+
"""
|
| 545 |
+
Returns True if there is an active trail (i.e. d-connection) between
|
| 546 |
+
`start` and `end` node given that `observed` is observed.
|
| 547 |
+
|
| 548 |
+
Parameters
|
| 549 |
+
----------
|
| 550 |
+
start, end : int, str, any hashable python object.
|
| 551 |
+
The nodes in the DAG between which to check the d-connection/active trail.
|
| 552 |
+
|
| 553 |
+
observed : list, array-like (optional)
|
| 554 |
+
If given the active trail would be computed assuming these nodes to
|
| 555 |
+
be observed.
|
| 556 |
+
|
| 557 |
+
include_latents: boolean (default: False)
|
| 558 |
+
If true, latent variables are return as part of the active trail.
|
| 559 |
+
|
| 560 |
+
Examples
|
| 561 |
+
--------
|
| 562 |
+
>>> from pgmpy.base import DAG
|
| 563 |
+
>>> student = DAG()
|
| 564 |
+
>>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat'])
|
| 565 |
+
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grades', 'letter'),
|
| 566 |
+
... ('intel', 'sat')])
|
| 567 |
+
>>> student.is_dconnected('diff', 'intel')
|
| 568 |
+
False
|
| 569 |
+
>>> student.is_dconnected('grades', 'sat')
|
| 570 |
+
True
|
| 571 |
+
"""
|
| 572 |
+
if (
|
| 573 |
+
end
|
| 574 |
+
in self.active_trail_nodes(
|
| 575 |
+
variables=start, observed=observed, include_latents=include_latents
|
| 576 |
+
)[start]
|
| 577 |
+
):
|
| 578 |
+
return True
|
| 579 |
+
else:
|
| 580 |
+
return False
|
| 581 |
+
|
| 582 |
+
def minimal_dseparator(self, start, end, include_latents=False):
|
| 583 |
+
"""
|
| 584 |
+
Finds the minimal d-separating set for `start` and `end`.
|
| 585 |
+
|
| 586 |
+
Parameters
|
| 587 |
+
----------
|
| 588 |
+
start: node
|
| 589 |
+
The first node.
|
| 590 |
+
|
| 591 |
+
end: node
|
| 592 |
+
The second node.
|
| 593 |
+
|
| 594 |
+
include_latents: boolean (default: False)
|
| 595 |
+
If true, latent variables are consider for minimal d-seperator.
|
| 596 |
+
|
| 597 |
+
Examples
|
| 598 |
+
--------
|
| 599 |
+
>>> dag = DAG([('A', 'B'), ('B', 'C')])
|
| 600 |
+
>>> dag.minimal_dseparator(start='A', end='C')
|
| 601 |
+
{'B'}
|
| 602 |
+
|
| 603 |
+
References
|
| 604 |
+
----------
|
| 605 |
+
[1] Algorithm 4, Page 10: Tian, Jin, Azaria Paz, and Judea Pearl. Finding minimal d-separators. Computer Science Department, University of California, 1998.
|
| 606 |
+
"""
|
| 607 |
+
if (end in self.neighbors(start)) or (start in self.neighbors(end)):
|
| 608 |
+
raise ValueError(
|
| 609 |
+
"No possible separators because start and end are adjacent"
|
| 610 |
+
)
|
| 611 |
+
an_graph = self.get_ancestral_graph([start, end])
|
| 612 |
+
separator = set(
|
| 613 |
+
itertools.chain(self.predecessors(start), self.predecessors(end))
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
if not include_latents:
|
| 617 |
+
# If any of the parents were latents, take the latent's parent
|
| 618 |
+
while len(separator.intersection(self.latents)) != 0:
|
| 619 |
+
separator_copy = separator.copy()
|
| 620 |
+
for u in separator:
|
| 621 |
+
if u in self.latents:
|
| 622 |
+
separator_copy.remove(u)
|
| 623 |
+
separator_copy.update(set(self.predecessors(u)))
|
| 624 |
+
separator = separator_copy
|
| 625 |
+
|
| 626 |
+
# Remove the start and end nodes in case it reaches there while removing latents.
|
| 627 |
+
separator.difference_update({start, end})
|
| 628 |
+
|
| 629 |
+
# If the initial set is not able to d-separate, no d-separator is possible.
|
| 630 |
+
if an_graph.is_dconnected(start, end, observed=separator):
|
| 631 |
+
return None
|
| 632 |
+
|
| 633 |
+
# Go through the separator set, remove one element and check if it remains
|
| 634 |
+
# a dseparating set.
|
| 635 |
+
minimal_separator = separator.copy()
|
| 636 |
+
|
| 637 |
+
for u in separator:
|
| 638 |
+
if not an_graph.is_dconnected(start, end, observed=minimal_separator - {u}):
|
| 639 |
+
minimal_separator.remove(u)
|
| 640 |
+
|
| 641 |
+
return minimal_separator
|
| 642 |
+
|
| 643 |
+
def get_markov_blanket(self, node):
|
| 644 |
+
"""
|
| 645 |
+
Returns a markov blanket for a random variable. In the case
|
| 646 |
+
of Bayesian Networks, the markov blanket is the set of
|
| 647 |
+
node's parents, its children and its children's other parents.
|
| 648 |
+
|
| 649 |
+
Returns
|
| 650 |
+
-------
|
| 651 |
+
Markov Blanket: list
|
| 652 |
+
List of nodes in the markov blanket of `node`.
|
| 653 |
+
|
| 654 |
+
Parameters
|
| 655 |
+
----------
|
| 656 |
+
node: string, int or any hashable python object.
|
| 657 |
+
The node whose markov blanket would be returned.
|
| 658 |
+
|
| 659 |
+
Examples
|
| 660 |
+
--------
|
| 661 |
+
>>> from pgmpy.base import DAG
|
| 662 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 663 |
+
>>> G = DAG([('x', 'y'), ('z', 'y'), ('y', 'w'), ('y', 'v'), ('u', 'w'),
|
| 664 |
+
('s', 'v'), ('w', 't'), ('w', 'm'), ('v', 'n'), ('v', 'q')])
|
| 665 |
+
>>> G.get_markov_blanket('y')
|
| 666 |
+
['s', 'w', 'x', 'u', 'z', 'v']
|
| 667 |
+
"""
|
| 668 |
+
children = self.get_children(node)
|
| 669 |
+
parents = self.get_parents(node)
|
| 670 |
+
blanket_nodes = children + parents
|
| 671 |
+
for child_node in children:
|
| 672 |
+
blanket_nodes.extend(self.get_parents(child_node))
|
| 673 |
+
blanket_nodes = set(blanket_nodes)
|
| 674 |
+
blanket_nodes.discard(node)
|
| 675 |
+
return list(blanket_nodes)
|
| 676 |
+
|
| 677 |
+
def active_trail_nodes(self, variables, observed=None, include_latents=False):
|
| 678 |
+
"""
|
| 679 |
+
Returns a dictionary with the given variables as keys and all the nodes reachable
|
| 680 |
+
from that respective variable as values.
|
| 681 |
+
|
| 682 |
+
Parameters
|
| 683 |
+
----------
|
| 684 |
+
variables: str or array like
|
| 685 |
+
variables whose active trails are to be found.
|
| 686 |
+
|
| 687 |
+
observed : List of nodes (optional)
|
| 688 |
+
If given the active trails would be computed assuming these nodes to be
|
| 689 |
+
observed.
|
| 690 |
+
|
| 691 |
+
include_latents: boolean (default: False)
|
| 692 |
+
Whether to include the latent variables in the returned active trail nodes.
|
| 693 |
+
|
| 694 |
+
Examples
|
| 695 |
+
--------
|
| 696 |
+
>>> from pgmpy.base import DAG
|
| 697 |
+
>>> student = DAG()
|
| 698 |
+
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
|
| 699 |
+
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
|
| 700 |
+
>>> student.active_trail_nodes('diff')
|
| 701 |
+
{'diff': {'diff', 'grades'}}
|
| 702 |
+
>>> student.active_trail_nodes(['diff', 'intel'], observed='grades')
|
| 703 |
+
{'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}}
|
| 704 |
+
|
| 705 |
+
References
|
| 706 |
+
----------
|
| 707 |
+
Details of the algorithm can be found in 'Probabilistic Graphical Model
|
| 708 |
+
Principles and Techniques' - Koller and Friedman
|
| 709 |
+
Page 75 Algorithm 3.1
|
| 710 |
+
"""
|
| 711 |
+
if observed:
|
| 712 |
+
if isinstance(observed, set):
|
| 713 |
+
observed = list(observed)
|
| 714 |
+
|
| 715 |
+
observed_list = (
|
| 716 |
+
observed if isinstance(observed, (list, tuple)) else [observed]
|
| 717 |
+
)
|
| 718 |
+
else:
|
| 719 |
+
observed_list = []
|
| 720 |
+
ancestors_list = self._get_ancestors_of(observed_list)
|
| 721 |
+
|
| 722 |
+
# Direction of flow of information
|
| 723 |
+
# up -> from parent to child
|
| 724 |
+
# down -> from child to parent
|
| 725 |
+
|
| 726 |
+
active_trails = {}
|
| 727 |
+
for start in variables if isinstance(variables, list) else [variables]:
|
| 728 |
+
visit_list = set()
|
| 729 |
+
visit_list.add((start, "up"))
|
| 730 |
+
traversed_list = set()
|
| 731 |
+
active_nodes = set()
|
| 732 |
+
while visit_list:
|
| 733 |
+
node, direction = visit_list.pop()
|
| 734 |
+
if (node, direction) not in traversed_list:
|
| 735 |
+
if node not in observed_list:
|
| 736 |
+
active_nodes.add(node)
|
| 737 |
+
traversed_list.add((node, direction))
|
| 738 |
+
if direction == "up" and node not in observed_list:
|
| 739 |
+
for parent in self.predecessors(node):
|
| 740 |
+
visit_list.add((parent, "up"))
|
| 741 |
+
for child in self.successors(node):
|
| 742 |
+
visit_list.add((child, "down"))
|
| 743 |
+
elif direction == "down":
|
| 744 |
+
if node not in observed_list:
|
| 745 |
+
for child in self.successors(node):
|
| 746 |
+
visit_list.add((child, "down"))
|
| 747 |
+
if node in ancestors_list:
|
| 748 |
+
for parent in self.predecessors(node):
|
| 749 |
+
visit_list.add((parent, "up"))
|
| 750 |
+
if include_latents:
|
| 751 |
+
active_trails[start] = active_nodes
|
| 752 |
+
else:
|
| 753 |
+
active_trails[start] = active_nodes - self.latents
|
| 754 |
+
|
| 755 |
+
return active_trails
|
| 756 |
+
|
| 757 |
+
def _get_ancestors_of(self, nodes):
|
| 758 |
+
"""
|
| 759 |
+
Returns a dictionary of all ancestors of all the observed nodes including the
|
| 760 |
+
node itself.
|
| 761 |
+
|
| 762 |
+
Parameters
|
| 763 |
+
----------
|
| 764 |
+
nodes: string, list-type
|
| 765 |
+
name of all the observed nodes
|
| 766 |
+
|
| 767 |
+
Examples
|
| 768 |
+
--------
|
| 769 |
+
>>> from pgmpy.base import DAG
|
| 770 |
+
>>> model = DAG([('D', 'G'), ('I', 'G'), ('G', 'L'),
|
| 771 |
+
... ('I', 'L')])
|
| 772 |
+
>>> model._get_ancestors_of('G')
|
| 773 |
+
{'D', 'G', 'I'}
|
| 774 |
+
>>> model._get_ancestors_of(['G', 'I'])
|
| 775 |
+
{'D', 'G', 'I'}
|
| 776 |
+
"""
|
| 777 |
+
if not isinstance(nodes, (list, tuple)):
|
| 778 |
+
nodes = [nodes]
|
| 779 |
+
|
| 780 |
+
for node in nodes:
|
| 781 |
+
if node not in self.nodes():
|
| 782 |
+
raise ValueError(f"Node {node} not in graph")
|
| 783 |
+
|
| 784 |
+
ancestors_list = set()
|
| 785 |
+
nodes_list = set(nodes)
|
| 786 |
+
while nodes_list:
|
| 787 |
+
node = nodes_list.pop()
|
| 788 |
+
if node not in ancestors_list:
|
| 789 |
+
nodes_list.update(self.predecessors(node))
|
| 790 |
+
ancestors_list.add(node)
|
| 791 |
+
return ancestors_list
|
| 792 |
+
|
| 793 |
+
# TODO: Commented out till the method is implemented.
|
| 794 |
+
# def to_pdag(self):
|
| 795 |
+
# """
|
| 796 |
+
# Returns the PDAG (the equivalence class of DAG; also known as CPDAG) of the DAG.
|
| 797 |
+
#
|
| 798 |
+
# Returns
|
| 799 |
+
# -------
|
| 800 |
+
# Partially oriented DAG: pgmpy.base.PDAG
|
| 801 |
+
# An instance of pgmpy.base.PDAG.
|
| 802 |
+
#
|
| 803 |
+
# Examples
|
| 804 |
+
# --------
|
| 805 |
+
#
|
| 806 |
+
# """
|
| 807 |
+
# pass
|
| 808 |
+
|
| 809 |
+
def do(self, nodes, inplace=False):
|
| 810 |
+
"""
|
| 811 |
+
Applies the do operator to the graph and returns a new DAG with the
|
| 812 |
+
transformed graph.
|
| 813 |
+
|
| 814 |
+
The do-operator, do(X = x) has the effect of removing all edges from
|
| 815 |
+
the parents of X and setting X to the given value x.
|
| 816 |
+
|
| 817 |
+
Parameters
|
| 818 |
+
----------
|
| 819 |
+
nodes : list, array-like
|
| 820 |
+
The names of the nodes to apply the do-operator for.
|
| 821 |
+
|
| 822 |
+
inplace: boolean (default: False)
|
| 823 |
+
If inplace=True, makes the changes to the current object,
|
| 824 |
+
otherwise returns a new instance.
|
| 825 |
+
|
| 826 |
+
Returns
|
| 827 |
+
-------
|
| 828 |
+
Modified DAG: pgmpy.base.DAG
|
| 829 |
+
A new instance of DAG modified by the do-operator
|
| 830 |
+
|
| 831 |
+
Examples
|
| 832 |
+
--------
|
| 833 |
+
Initialize a DAG
|
| 834 |
+
>>> graph = DAG()
|
| 835 |
+
>>> graph.add_edges_from([('X', 'A'),
|
| 836 |
+
... ('A', 'Y'),
|
| 837 |
+
... ('A', 'B')])
|
| 838 |
+
>>> # Applying the do-operator will return a new DAG with the desired structure.
|
| 839 |
+
>>> graph_do_A = graph.do('A')
|
| 840 |
+
>>> # Which we can verify is missing the edges we would expect.
|
| 841 |
+
>>> graph_do_A.edges
|
| 842 |
+
OutEdgeView([('A', 'B'), ('A', 'Y')])
|
| 843 |
+
|
| 844 |
+
References
|
| 845 |
+
----------
|
| 846 |
+
Causality: Models, Reasoning, and Inference, Judea Pearl (2000). p.70.
|
| 847 |
+
"""
|
| 848 |
+
dag = self if inplace else self.copy()
|
| 849 |
+
|
| 850 |
+
if isinstance(nodes, (str, int)):
|
| 851 |
+
nodes = [nodes]
|
| 852 |
+
else:
|
| 853 |
+
nodes = list(nodes)
|
| 854 |
+
|
| 855 |
+
if not set(nodes).issubset(set(self.nodes())):
|
| 856 |
+
raise ValueError(
|
| 857 |
+
f"Nodes not found in the model: {set(nodes) - set(self.nodes)}"
|
| 858 |
+
)
|
| 859 |
+
|
| 860 |
+
for node in nodes:
|
| 861 |
+
parents = list(dag.predecessors(node))
|
| 862 |
+
for parent in parents:
|
| 863 |
+
dag.remove_edge(parent, node)
|
| 864 |
+
return dag
|
| 865 |
+
|
| 866 |
+
def get_ancestral_graph(self, nodes):
|
| 867 |
+
"""
|
| 868 |
+
Returns the ancestral graph of the given `nodes`. The ancestral graph only
|
| 869 |
+
contains the nodes which are ancestors of atleast one of the variables in
|
| 870 |
+
node.
|
| 871 |
+
|
| 872 |
+
Parameters
|
| 873 |
+
----------
|
| 874 |
+
node: iterable
|
| 875 |
+
List of nodes whose ancestral graph needs to be computed.
|
| 876 |
+
|
| 877 |
+
Returns
|
| 878 |
+
-------
|
| 879 |
+
Ancestral Graph: pgmpy.base.DAG
|
| 880 |
+
|
| 881 |
+
Examples
|
| 882 |
+
--------
|
| 883 |
+
>>> from pgmpy.base import DAG
|
| 884 |
+
>>> dag = DAG([('A', 'C'), ('B', 'C'), ('D', 'A'), ('D', 'B')])
|
| 885 |
+
>>> anc_dag = dag.get_ancestral_graph(nodes=['A', 'B'])
|
| 886 |
+
>>> anc_dag.edges()
|
| 887 |
+
OutEdgeView([('D', 'A'), ('D', 'B')])
|
| 888 |
+
"""
|
| 889 |
+
return self.subgraph(nodes=self._get_ancestors_of(nodes=nodes))
|
| 890 |
+
|
| 891 |
+
def to_daft(
|
| 892 |
+
self,
|
| 893 |
+
node_pos="circular",
|
| 894 |
+
latex=True,
|
| 895 |
+
pgm_params={},
|
| 896 |
+
edge_params={},
|
| 897 |
+
node_params={},
|
| 898 |
+
):
|
| 899 |
+
"""
|
| 900 |
+
Returns a daft (https://docs.daft-pgm.org/en/latest/) object which can be rendered for
|
| 901 |
+
publication quality plots. The returned object's render method can be called to see the plots.
|
| 902 |
+
|
| 903 |
+
Parameters
|
| 904 |
+
----------
|
| 905 |
+
node_pos: str or dict (default: circular)
|
| 906 |
+
If str: Must be one of the following: circular, kamada_kawai, planar, random, shell, sprint,
|
| 907 |
+
spectral, spiral. Please refer: https://networkx.org/documentation/stable//reference/drawing.html#module-networkx.drawing.layout for details on these layouts.
|
| 908 |
+
|
| 909 |
+
If dict should be of the form {node: (x coordinate, y coordinate)} describing the x and y coordinate of each
|
| 910 |
+
node.
|
| 911 |
+
|
| 912 |
+
If no argument is provided uses circular layout.
|
| 913 |
+
|
| 914 |
+
latex: boolean
|
| 915 |
+
Whether to use latex for rendering the node names.
|
| 916 |
+
|
| 917 |
+
pgm_params: dict (optional)
|
| 918 |
+
Any additional parameters that need to be passed to `daft.PGM` initializer.
|
| 919 |
+
Should be of the form: {param_name: param_value}
|
| 920 |
+
|
| 921 |
+
edge_params: dict (optional)
|
| 922 |
+
Any additional edge parameters that need to be passed to `daft.add_edge` method.
|
| 923 |
+
Should be of the form: {(u1, v1): {param_name: param_value}, (u2, v2): {...} }
|
| 924 |
+
|
| 925 |
+
node_params: dict (optional)
|
| 926 |
+
Any additional node parameters that need to be passed to `daft.add_node` method.
|
| 927 |
+
Should be of the form: {node1: {param_name: param_value}, node2: {...} }
|
| 928 |
+
|
| 929 |
+
Returns
|
| 930 |
+
-------
|
| 931 |
+
Daft object: daft.PGM object
|
| 932 |
+
Daft object for plotting the DAG.
|
| 933 |
+
|
| 934 |
+
Examples
|
| 935 |
+
--------
|
| 936 |
+
>>> from pgmpy.base import DAG
|
| 937 |
+
>>> dag = DAG([('a', 'b'), ('b', 'c'), ('d', 'c')])
|
| 938 |
+
>>> dag.to_daft(node_pos={'a': (0, 0), 'b': (1, 0), 'c': (2, 0), 'd': (1, 1)})
|
| 939 |
+
<daft.PGM at 0x7fc756e936d0>
|
| 940 |
+
>>> dag.to_daft(node_pos="circular")
|
| 941 |
+
<daft.PGM at 0x7f9bb48c5eb0>
|
| 942 |
+
>>> dag.to_daft(node_pos="circular", pgm_params={'observed_style': 'inner'})
|
| 943 |
+
<daft.PGM at 0x7f9bb48b0bb0>
|
| 944 |
+
>>> dag.to_daft(node_pos="circular",
|
| 945 |
+
... edge_params={('a', 'b'): {'label': 2}},
|
| 946 |
+
... node_params={'a': {'shape': 'rectangle'}})
|
| 947 |
+
<daft.PGM at 0x7f9bb48b0bb0>
|
| 948 |
+
"""
|
| 949 |
+
try:
|
| 950 |
+
from daft import PGM
|
| 951 |
+
except ImportError as e:
|
| 952 |
+
raise ImportError(
|
| 953 |
+
"Package daft required. Please visit: https://docs.daft-pgm.org/en/latest/ for installation instructions."
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
if isinstance(node_pos, str):
|
| 957 |
+
supported_layouts = {
|
| 958 |
+
"circular": nx.circular_layout,
|
| 959 |
+
"kamada_kawai": nx.kamada_kawai_layout,
|
| 960 |
+
"planar": nx.planar_layout,
|
| 961 |
+
"random": nx.random_layout,
|
| 962 |
+
"shell": nx.shell_layout,
|
| 963 |
+
"spring": nx.spring_layout,
|
| 964 |
+
"spectral": nx.spectral_layout,
|
| 965 |
+
"spiral": nx.spiral_layout,
|
| 966 |
+
}
|
| 967 |
+
if node_pos not in supported_layouts.keys():
|
| 968 |
+
raise ValueError(
|
| 969 |
+
"Unknown node_pos argument. Please refer docstring for accepted values"
|
| 970 |
+
)
|
| 971 |
+
else:
|
| 972 |
+
node_pos = supported_layouts[node_pos](self)
|
| 973 |
+
elif isinstance(node_pos, dict):
|
| 974 |
+
for node in self.nodes():
|
| 975 |
+
if node not in node_pos.keys():
|
| 976 |
+
raise ValueError(f"No position specified for {node}.")
|
| 977 |
+
else:
|
| 978 |
+
raise ValueError(
|
| 979 |
+
"Argument node_pos not valid. Please refer to the docstring."
|
| 980 |
+
)
|
| 981 |
+
|
| 982 |
+
daft_pgm = PGM(**pgm_params)
|
| 983 |
+
for node in self.nodes():
|
| 984 |
+
try:
|
| 985 |
+
extra_params = node_params[node]
|
| 986 |
+
except KeyError:
|
| 987 |
+
extra_params = dict()
|
| 988 |
+
|
| 989 |
+
if latex:
|
| 990 |
+
daft_pgm.add_node(
|
| 991 |
+
node,
|
| 992 |
+
rf"${node}$",
|
| 993 |
+
node_pos[node][0],
|
| 994 |
+
node_pos[node][1],
|
| 995 |
+
observed=True,
|
| 996 |
+
**extra_params,
|
| 997 |
+
)
|
| 998 |
+
else:
|
| 999 |
+
daft_pgm.add_node(
|
| 1000 |
+
node,
|
| 1001 |
+
f"{node}",
|
| 1002 |
+
node_pos[node][0],
|
| 1003 |
+
node_pos[node][1],
|
| 1004 |
+
observed=True,
|
| 1005 |
+
**extra_params,
|
| 1006 |
+
)
|
| 1007 |
+
|
| 1008 |
+
for u, v in self.edges():
|
| 1009 |
+
try:
|
| 1010 |
+
extra_params = edge_params[(u, v)]
|
| 1011 |
+
except KeyError:
|
| 1012 |
+
extra_params = dict()
|
| 1013 |
+
daft_pgm.add_edge(u, v, **extra_params)
|
| 1014 |
+
|
| 1015 |
+
return daft_pgm
|
| 1016 |
+
|
| 1017 |
+
@staticmethod
|
| 1018 |
+
def get_random(n_nodes=5, edge_prob=0.5, node_names=None, latents=False, seed=None):
|
| 1019 |
+
"""
|
| 1020 |
+
Returns a randomly generated DAG with `n_nodes` number of nodes with
|
| 1021 |
+
edge probability being `edge_prob`.
|
| 1022 |
+
|
| 1023 |
+
Parameters
|
| 1024 |
+
----------
|
| 1025 |
+
n_nodes: int
|
| 1026 |
+
The number of nodes in the randomly generated DAG.
|
| 1027 |
+
|
| 1028 |
+
edge_prob: float
|
| 1029 |
+
The probability of edge between any two nodes in the topologically
|
| 1030 |
+
sorted DAG.
|
| 1031 |
+
|
| 1032 |
+
node_names: list (default: None)
|
| 1033 |
+
A list of variables names to use in the random graph.
|
| 1034 |
+
If None, the node names are integer values starting from 0.
|
| 1035 |
+
|
| 1036 |
+
latents: bool (default: False)
|
| 1037 |
+
If True, includes latent variables in the generated DAG.
|
| 1038 |
+
|
| 1039 |
+
seed: int (default: None)
|
| 1040 |
+
The seed for the random number generator.
|
| 1041 |
+
|
| 1042 |
+
Returns
|
| 1043 |
+
-------
|
| 1044 |
+
Random DAG: pgmpy.base.DAG
|
| 1045 |
+
The randomly generated DAG.
|
| 1046 |
+
|
| 1047 |
+
Examples
|
| 1048 |
+
--------
|
| 1049 |
+
>>> from pgmpy.base import DAG
|
| 1050 |
+
>>> random_dag = DAG.get_random(n_nodes=10, edge_prob=0.3)
|
| 1051 |
+
>>> random_dag.nodes()
|
| 1052 |
+
NodeView((0, 1, 2, 3, 4, 5, 6, 7, 8, 9))
|
| 1053 |
+
>>> random_dag.edges()
|
| 1054 |
+
OutEdgeView([(0, 6), (1, 6), (1, 7), (7, 9), (2, 5), (2, 7), (2, 8), (5, 9), (3, 7)])
|
| 1055 |
+
"""
|
| 1056 |
+
# Step 1: Generate a matrix of 0 and 1. Prob of choosing 1 = edge_prob
|
| 1057 |
+
gen = np.random.default_rng(seed=seed)
|
| 1058 |
+
adj_mat = gen.choice(
|
| 1059 |
+
[0, 1], size=(n_nodes, n_nodes), p=[1 - edge_prob, edge_prob]
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
# Step 2: Use the upper triangular part of the matrix as adjacency.
|
| 1063 |
+
if node_names is None:
|
| 1064 |
+
node_names = list(range(n_nodes))
|
| 1065 |
+
|
| 1066 |
+
adj_pd = pd.DataFrame(
|
| 1067 |
+
np.triu(adj_mat, k=1), columns=node_names, index=node_names
|
| 1068 |
+
)
|
| 1069 |
+
nx_dag = nx.from_pandas_adjacency(adj_pd, create_using=nx.DiGraph)
|
| 1070 |
+
|
| 1071 |
+
dag = DAG(nx_dag)
|
| 1072 |
+
dag.add_nodes_from(node_names)
|
| 1073 |
+
|
| 1074 |
+
if latents:
|
| 1075 |
+
dag.latents = set(
|
| 1076 |
+
gen.choice(dag.nodes(), gen.integers(low=0, high=len(dag.nodes())))
|
| 1077 |
+
)
|
| 1078 |
+
return dag
|
| 1079 |
+
|
| 1080 |
+
def to_graphviz(self):
|
| 1081 |
+
"""
|
| 1082 |
+
Retuns a pygraphviz object for the DAG. pygraphviz is useful for
|
| 1083 |
+
visualizing the network structure.
|
| 1084 |
+
|
| 1085 |
+
Examples
|
| 1086 |
+
--------
|
| 1087 |
+
>>> from pgmpy.utils import get_example_model
|
| 1088 |
+
>>> model = get_example_model('alarm')
|
| 1089 |
+
>>> model.to_graphviz()
|
| 1090 |
+
<AGraph <Swig Object of type 'Agraph_t *' at 0x7fdea4cde040>>
|
| 1091 |
+
>>> model.draw('model.png', prog='neato')
|
| 1092 |
+
"""
|
| 1093 |
+
return nx.nx_agraph.to_agraph(self)
|
| 1094 |
+
|
| 1095 |
+
def fit(self, data, estimator=None, state_names=[], n_jobs=1, **kwargs):
|
| 1096 |
+
"""
|
| 1097 |
+
Estimates the CPD for each variable based on a given data set.
|
| 1098 |
+
|
| 1099 |
+
Parameters
|
| 1100 |
+
----------
|
| 1101 |
+
data: pandas DataFrame object
|
| 1102 |
+
DataFrame object with column names identical to the variable names of the network.
|
| 1103 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 1104 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 1105 |
+
|
| 1106 |
+
estimator: Estimator class
|
| 1107 |
+
One of:
|
| 1108 |
+
- MaximumLikelihoodEstimator (default)
|
| 1109 |
+
- BayesianEstimator: In this case, pass 'prior_type' and either 'pseudo_counts'
|
| 1110 |
+
or 'equivalent_sample_size' as additional keyword arguments.
|
| 1111 |
+
See `BayesianEstimator.get_parameters()` for usage.
|
| 1112 |
+
- ExpectationMaximization
|
| 1113 |
+
|
| 1114 |
+
state_names: dict (optional)
|
| 1115 |
+
A dict indicating, for each variable, the discrete set of states
|
| 1116 |
+
that the variable can take. If unspecified, the observed values
|
| 1117 |
+
in the data set are taken to be the only possible states.
|
| 1118 |
+
|
| 1119 |
+
n_jobs: int (default: 1)
|
| 1120 |
+
Number of threads/processes to use for estimation. Using n_jobs > 1
|
| 1121 |
+
for small models or datasets might be slower.
|
| 1122 |
+
|
| 1123 |
+
Returns
|
| 1124 |
+
-------
|
| 1125 |
+
Fitted Model: BayesianNetwork
|
| 1126 |
+
Returns a BayesianNetwork object with learned CPDs.
|
| 1127 |
+
The DAG structure is preserved, and parameters (CPDs) are added.
|
| 1128 |
+
This allows the DAG to represent both the structure and the parameters of a Bayesian Network.
|
| 1129 |
+
|
| 1130 |
+
Examples
|
| 1131 |
+
--------
|
| 1132 |
+
>>> import pandas as pd
|
| 1133 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 1134 |
+
>>> from pgmpy.base import DAG
|
| 1135 |
+
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
|
| 1136 |
+
>>> model = DAG([('A', 'C'), ('B', 'C')])
|
| 1137 |
+
>>> fitted_model = model.fit(data)
|
| 1138 |
+
>>> fitted_model.get_cpds()
|
| 1139 |
+
[<TabularCPD representing P(A:2) at 0x17945372c30>,
|
| 1140 |
+
<TabularCPD representing P(B:2) at 0x17945a19760>,
|
| 1141 |
+
<TabularCPD representing P(C:2 | A:2, B:2) at 0x17944f42690>]
|
| 1142 |
+
"""
|
| 1143 |
+
from pgmpy.estimators import BaseEstimator, MaximumLikelihoodEstimator
|
| 1144 |
+
from pgmpy.models import BayesianNetwork
|
| 1145 |
+
|
| 1146 |
+
if isinstance(self, BayesianNetwork):
|
| 1147 |
+
bn = self
|
| 1148 |
+
else:
|
| 1149 |
+
bn = BayesianNetwork(self.edges())
|
| 1150 |
+
|
| 1151 |
+
if estimator is None:
|
| 1152 |
+
estimator = MaximumLikelihoodEstimator
|
| 1153 |
+
else:
|
| 1154 |
+
if not issubclass(estimator, BaseEstimator):
|
| 1155 |
+
raise TypeError("Estimator object should be a valid pgmpy estimator.")
|
| 1156 |
+
|
| 1157 |
+
_estimator = estimator(
|
| 1158 |
+
bn,
|
| 1159 |
+
data,
|
| 1160 |
+
state_names=state_names,
|
| 1161 |
+
)
|
| 1162 |
+
cpds_list = _estimator.get_parameters(n_jobs=n_jobs, **kwargs)
|
| 1163 |
+
bn.add_cpds(*cpds_list)
|
| 1164 |
+
return bn
|
| 1165 |
+
|
| 1166 |
+
|
| 1167 |
+
class PDAG(nx.DiGraph):
|
| 1168 |
+
"""
|
| 1169 |
+
Class for representing PDAGs (also known as CPDAG). PDAGs are the equivalence classes of
|
| 1170 |
+
DAGs and contain both directed and undirected edges.
|
| 1171 |
+
|
| 1172 |
+
Note: In this class, undirected edges are represented using two edges in both direction i.e.
|
| 1173 |
+
an undirected edge between X - Y is represented using X -> Y and X <- Y.
|
| 1174 |
+
"""
|
| 1175 |
+
|
| 1176 |
+
def __init__(self, directed_ebunch=[], undirected_ebunch=[], latents=[]):
|
| 1177 |
+
"""
|
| 1178 |
+
Initializes a PDAG class.
|
| 1179 |
+
|
| 1180 |
+
Parameters
|
| 1181 |
+
----------
|
| 1182 |
+
directed_ebunch: list, array-like of 2-tuples
|
| 1183 |
+
List of directed edges in the PDAG.
|
| 1184 |
+
|
| 1185 |
+
undirected_ebunch: list, array-like of 2-tuples
|
| 1186 |
+
List of undirected edges in the PDAG.
|
| 1187 |
+
|
| 1188 |
+
latents: list, array-like
|
| 1189 |
+
List of nodes which are latent variables.
|
| 1190 |
+
|
| 1191 |
+
Returns
|
| 1192 |
+
-------
|
| 1193 |
+
An instance of the PDAG object.
|
| 1194 |
+
|
| 1195 |
+
Examples
|
| 1196 |
+
--------
|
| 1197 |
+
"""
|
| 1198 |
+
super(PDAG, self).__init__(
|
| 1199 |
+
directed_ebunch
|
| 1200 |
+
+ undirected_ebunch
|
| 1201 |
+
+ [(Y, X) for (X, Y) in undirected_ebunch]
|
| 1202 |
+
)
|
| 1203 |
+
self.latents = set(latents)
|
| 1204 |
+
self.directed_edges = set(directed_ebunch)
|
| 1205 |
+
self.undirected_edges = set(undirected_ebunch)
|
| 1206 |
+
# TODO: Fix the cycle issue
|
| 1207 |
+
# import pdb; pdb.set_trace()
|
| 1208 |
+
# try:
|
| 1209 |
+
# # Filter out undirected edges as they also form a cycle in
|
| 1210 |
+
# # themself when represented using directed edges.
|
| 1211 |
+
# cycles = filter(lambda t: len(t) > 2, nx.simple_cycles(self))
|
| 1212 |
+
# if cycles:
|
| 1213 |
+
# out_str = "Cycles are not allowed in a PDAG. "
|
| 1214 |
+
# out_str += "The following path forms a loop: "
|
| 1215 |
+
# out_str += "".join(["({u},{v}) ".format(u=u, v=v) for (u, v) in cycles])
|
| 1216 |
+
# raise ValueError(out_str)
|
| 1217 |
+
# except nx.NetworkXNoCycle:
|
| 1218 |
+
# pass
|
| 1219 |
+
|
| 1220 |
+
def copy(self):
|
| 1221 |
+
"""
|
| 1222 |
+
Returns a copy of the object instance.
|
| 1223 |
+
|
| 1224 |
+
Returns
|
| 1225 |
+
-------
|
| 1226 |
+
Copy of PDAG: pgmpy.dag.PDAG
|
| 1227 |
+
Returns a copy of self.
|
| 1228 |
+
"""
|
| 1229 |
+
return PDAG(
|
| 1230 |
+
directed_ebunch=list(self.directed_edges.copy()),
|
| 1231 |
+
undirected_ebunch=list(self.undirected_edges.copy()),
|
| 1232 |
+
latents=self.latents,
|
| 1233 |
+
)
|
| 1234 |
+
|
| 1235 |
+
def to_dag(self, required_edges=[]):
|
| 1236 |
+
"""
|
| 1237 |
+
Returns one possible DAG which is represented using the PDAG.
|
| 1238 |
+
|
| 1239 |
+
Parameters
|
| 1240 |
+
----------
|
| 1241 |
+
required_edges: list, array-like of 2-tuples
|
| 1242 |
+
The list of edges that should be included in the DAG.
|
| 1243 |
+
|
| 1244 |
+
Returns
|
| 1245 |
+
-------
|
| 1246 |
+
Returns an instance of DAG.
|
| 1247 |
+
|
| 1248 |
+
Examples
|
| 1249 |
+
--------
|
| 1250 |
+
|
| 1251 |
+
"""
|
| 1252 |
+
# Add required edges if it doesn't form a new v-structure or an opposite edge
|
| 1253 |
+
# is already present in the network.
|
| 1254 |
+
dag = DAG()
|
| 1255 |
+
# Add all the nodes and the directed edges
|
| 1256 |
+
dag.add_nodes_from(self.nodes())
|
| 1257 |
+
dag.add_edges_from(self.directed_edges)
|
| 1258 |
+
dag.latents = self.latents
|
| 1259 |
+
|
| 1260 |
+
pdag = self.copy()
|
| 1261 |
+
while pdag.number_of_nodes() > 0:
|
| 1262 |
+
# find node with (1) no directed outgoing edges and
|
| 1263 |
+
# (2) the set of undirected neighbors is either empty or
|
| 1264 |
+
# undirected neighbors + parents of X are a clique
|
| 1265 |
+
found = False
|
| 1266 |
+
for X in pdag.nodes():
|
| 1267 |
+
directed_outgoing_edges = set(pdag.successors(X)) - set(
|
| 1268 |
+
pdag.predecessors(X)
|
| 1269 |
+
)
|
| 1270 |
+
undirected_neighbors = set(pdag.successors(X)) & set(
|
| 1271 |
+
pdag.predecessors(X)
|
| 1272 |
+
)
|
| 1273 |
+
neighbors_are_clique = all(
|
| 1274 |
+
(
|
| 1275 |
+
pdag.has_edge(Y, Z)
|
| 1276 |
+
for Z in pdag.predecessors(X)
|
| 1277 |
+
for Y in undirected_neighbors
|
| 1278 |
+
if not Y == Z
|
| 1279 |
+
)
|
| 1280 |
+
)
|
| 1281 |
+
|
| 1282 |
+
if not directed_outgoing_edges and (
|
| 1283 |
+
not undirected_neighbors or neighbors_are_clique
|
| 1284 |
+
):
|
| 1285 |
+
found = True
|
| 1286 |
+
# add all edges of X as outgoing edges to dag
|
| 1287 |
+
for Y in pdag.predecessors(X):
|
| 1288 |
+
dag.add_edge(Y, X)
|
| 1289 |
+
pdag.remove_node(X)
|
| 1290 |
+
break
|
| 1291 |
+
|
| 1292 |
+
if not found:
|
| 1293 |
+
logger.warning(
|
| 1294 |
+
"PDAG has no faithful extension (= no oriented DAG with the "
|
| 1295 |
+
+ "same v-structures as PDAG). Remaining undirected PDAG edges "
|
| 1296 |
+
+ "oriented arbitrarily."
|
| 1297 |
+
)
|
| 1298 |
+
for X, Y in pdag.edges():
|
| 1299 |
+
if not dag.has_edge(Y, X):
|
| 1300 |
+
try:
|
| 1301 |
+
dag.add_edge(X, Y)
|
| 1302 |
+
except ValueError:
|
| 1303 |
+
pass
|
| 1304 |
+
break
|
| 1305 |
+
return dag
|
| 1306 |
+
|
| 1307 |
+
def to_graphviz(self):
|
| 1308 |
+
"""
|
| 1309 |
+
Retuns a pygraphviz object for the DAG. pygraphviz is useful for
|
| 1310 |
+
visualizing the network structure.
|
| 1311 |
+
|
| 1312 |
+
Examples
|
| 1313 |
+
--------
|
| 1314 |
+
>>> from pgmpy.utils import get_example_model
|
| 1315 |
+
>>> model = get_example_model('alarm')
|
| 1316 |
+
>>> model.to_graphviz()
|
| 1317 |
+
<AGraph <Swig Object of type 'Agraph_t *' at 0x7fdea4cde040>>
|
| 1318 |
+
"""
|
| 1319 |
+
return nx.nx_agraph.to_agraph(self)
|
testbed/pgmpy__pgmpy/pgmpy/base/UndirectedGraph.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
import itertools
|
| 4 |
+
|
| 5 |
+
import networkx as nx
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class UndirectedGraph(nx.Graph):
|
| 9 |
+
"""
|
| 10 |
+
Base class for all the Undirected Graphical models.
|
| 11 |
+
|
| 12 |
+
Each node in the graph can represent either a random variable, `Factor`,
|
| 13 |
+
or a cluster of random variables. Edges in the graph are interactions
|
| 14 |
+
between the nodes.
|
| 15 |
+
|
| 16 |
+
Parameters
|
| 17 |
+
----------
|
| 18 |
+
data: input graph
|
| 19 |
+
Data to initialize graph. If data=None (default) an empty graph is
|
| 20 |
+
created. The data can be an edge list or any Networkx graph object.
|
| 21 |
+
|
| 22 |
+
Examples
|
| 23 |
+
--------
|
| 24 |
+
Create an empty UndirectedGraph with no nodes and no edges
|
| 25 |
+
|
| 26 |
+
>>> from pgmpy.base import UndirectedGraph
|
| 27 |
+
>>> G = UndirectedGraph()
|
| 28 |
+
|
| 29 |
+
G can be grown in several ways
|
| 30 |
+
|
| 31 |
+
**Nodes:**
|
| 32 |
+
|
| 33 |
+
Add one node at a time:
|
| 34 |
+
|
| 35 |
+
>>> G.add_node('a')
|
| 36 |
+
|
| 37 |
+
Add the nodes from any container (a list, set or tuple or the nodes
|
| 38 |
+
from another graph).
|
| 39 |
+
|
| 40 |
+
>>> G.add_nodes_from(['a', 'b'])
|
| 41 |
+
|
| 42 |
+
**Edges:**
|
| 43 |
+
|
| 44 |
+
G can also be grown by adding edges.
|
| 45 |
+
|
| 46 |
+
Add one edge,
|
| 47 |
+
|
| 48 |
+
>>> G.add_edge('a', 'b')
|
| 49 |
+
|
| 50 |
+
a list of edges,
|
| 51 |
+
|
| 52 |
+
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
|
| 53 |
+
|
| 54 |
+
If some edges connect nodes not yet in the model, the nodes
|
| 55 |
+
are added automatically. There are no errors when adding
|
| 56 |
+
nodes or edges that already exist.
|
| 57 |
+
|
| 58 |
+
**Shortcuts:**
|
| 59 |
+
|
| 60 |
+
Many common graph features allow python syntax for speed reporting.
|
| 61 |
+
|
| 62 |
+
>>> 'a' in G # check if node in graph
|
| 63 |
+
True
|
| 64 |
+
>>> len(G) # number of nodes in graph
|
| 65 |
+
3
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(self, ebunch=None):
|
| 69 |
+
super(UndirectedGraph, self).__init__(ebunch)
|
| 70 |
+
|
| 71 |
+
def add_node(self, node, weight=None):
|
| 72 |
+
"""
|
| 73 |
+
Add a single node to the Graph.
|
| 74 |
+
|
| 75 |
+
Parameters
|
| 76 |
+
----------
|
| 77 |
+
node: str, int, or any hashable python object.
|
| 78 |
+
The node to add to the graph.
|
| 79 |
+
|
| 80 |
+
weight: int, float
|
| 81 |
+
The weight of the node.
|
| 82 |
+
|
| 83 |
+
Examples
|
| 84 |
+
--------
|
| 85 |
+
>>> from pgmpy.base import UndirectedGraph
|
| 86 |
+
>>> G = UndirectedGraph()
|
| 87 |
+
>>> G.add_node(node='A')
|
| 88 |
+
>>> G.nodes()
|
| 89 |
+
NodeView(('A',))
|
| 90 |
+
|
| 91 |
+
Adding a node with some weight.
|
| 92 |
+
>>> G.add_node(node='B', weight=0.3)
|
| 93 |
+
|
| 94 |
+
The weight of these nodes can be accessed as:
|
| 95 |
+
>>> G.nodes['B']
|
| 96 |
+
{'weight': 0.3}
|
| 97 |
+
>>> G.nodes['A']
|
| 98 |
+
{'weight': None}
|
| 99 |
+
"""
|
| 100 |
+
# Check for networkx 2.0 syntax
|
| 101 |
+
if isinstance(node, tuple) and len(node) == 2 and isinstance(node[1], dict):
|
| 102 |
+
node, attrs = node
|
| 103 |
+
if attrs.get("weight", None) is not None:
|
| 104 |
+
attrs["weight"] = weight
|
| 105 |
+
else:
|
| 106 |
+
attrs = {"weight": weight}
|
| 107 |
+
super(UndirectedGraph, self).add_node(node, weight=weight)
|
| 108 |
+
|
| 109 |
+
def add_nodes_from(self, nodes, weights=None):
|
| 110 |
+
"""
|
| 111 |
+
Add multiple nodes to the Graph.
|
| 112 |
+
|
| 113 |
+
**The behaviour of adding weights is different than in networkx.
|
| 114 |
+
|
| 115 |
+
Parameters
|
| 116 |
+
----------
|
| 117 |
+
nodes: iterable container
|
| 118 |
+
A container of nodes (list, dict, set, or any hashable python
|
| 119 |
+
object).
|
| 120 |
+
|
| 121 |
+
weights: list, tuple (default=None)
|
| 122 |
+
A container of weights (int, float). The weight value at index i
|
| 123 |
+
is associated with the variable at index i.
|
| 124 |
+
|
| 125 |
+
Examples
|
| 126 |
+
--------
|
| 127 |
+
>>> from pgmpy.base import UndirectedGraph
|
| 128 |
+
>>> G = UndirectedGraph()
|
| 129 |
+
>>> G.add_nodes_from(nodes=['A', 'B', 'C'])
|
| 130 |
+
>>> G.nodes()
|
| 131 |
+
NodeView(('A', 'B', 'C'))
|
| 132 |
+
|
| 133 |
+
Adding nodes with weights:
|
| 134 |
+
>>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6])
|
| 135 |
+
>>> G.nodes['D']
|
| 136 |
+
{'weight': 0.3}
|
| 137 |
+
>>> G.nodes['E']
|
| 138 |
+
{'weight': 0.6}
|
| 139 |
+
>>> G.nodes['A']
|
| 140 |
+
{'weight': None}
|
| 141 |
+
"""
|
| 142 |
+
nodes = list(nodes)
|
| 143 |
+
|
| 144 |
+
if weights:
|
| 145 |
+
if len(nodes) != len(weights):
|
| 146 |
+
raise ValueError(
|
| 147 |
+
"The number of elements in nodes and weights" "should be equal."
|
| 148 |
+
)
|
| 149 |
+
for index in range(len(nodes)):
|
| 150 |
+
self.add_node(node=nodes[index], weight=weights[index])
|
| 151 |
+
else:
|
| 152 |
+
for node in nodes:
|
| 153 |
+
self.add_node(node=node)
|
| 154 |
+
|
| 155 |
+
def add_edge(self, u, v, weight=None):
|
| 156 |
+
"""
|
| 157 |
+
Add an edge between u and v.
|
| 158 |
+
|
| 159 |
+
The nodes u and v will be automatically added if they are
|
| 160 |
+
not already in the graph.
|
| 161 |
+
|
| 162 |
+
Parameters
|
| 163 |
+
----------
|
| 164 |
+
u, v : nodes
|
| 165 |
+
Nodes can be any hashable Python object.
|
| 166 |
+
|
| 167 |
+
weight: int, float (default=None)
|
| 168 |
+
The weight of the edge.
|
| 169 |
+
|
| 170 |
+
Examples
|
| 171 |
+
--------
|
| 172 |
+
>>> from pgmpy.base import UndirectedGraph
|
| 173 |
+
>>> G = UndirectedGraph()
|
| 174 |
+
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
|
| 175 |
+
>>> G.add_edge(u='Alice', v='Bob')
|
| 176 |
+
>>> G.nodes()
|
| 177 |
+
NodeView(('Alice', 'Bob', 'Charles'))
|
| 178 |
+
>>> G.edges()
|
| 179 |
+
EdgeView([('Alice', 'Bob')])
|
| 180 |
+
|
| 181 |
+
When the node is not already present in the graph:
|
| 182 |
+
>>> G.add_edge(u='Alice', v='Ankur')
|
| 183 |
+
>>> G.nodes()
|
| 184 |
+
NodeView('Alice', 'Ankur', 'Bob', 'Charles'))
|
| 185 |
+
>>> G.edges()
|
| 186 |
+
EdgeView([('Alice', 'Bob'), ('Alice', 'Ankur')])
|
| 187 |
+
|
| 188 |
+
Adding edges with weight:
|
| 189 |
+
>>> G.add_edge('Ankur', 'Maria', weight=0.1)
|
| 190 |
+
>>> G.edge['Ankur']['Maria']
|
| 191 |
+
{'weight': 0.1}
|
| 192 |
+
"""
|
| 193 |
+
super(UndirectedGraph, self).add_edge(u, v, weight=weight)
|
| 194 |
+
|
| 195 |
+
def add_edges_from(self, ebunch, weights=None):
|
| 196 |
+
"""
|
| 197 |
+
Add all the edges in ebunch.
|
| 198 |
+
|
| 199 |
+
If nodes referred in the ebunch are not already present, they
|
| 200 |
+
will be automatically added. Node names can be any hashable python
|
| 201 |
+
object.
|
| 202 |
+
|
| 203 |
+
**The behavior of adding weights is different than networkx.
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
ebunch : container of edges
|
| 208 |
+
Each edge given in the container will be added to the graph.
|
| 209 |
+
The edges must be given as 2-tuples (u, v).
|
| 210 |
+
|
| 211 |
+
weights: list, tuple (default=None)
|
| 212 |
+
A container of weights (int, float). The weight value at index i
|
| 213 |
+
is associated with the edge at index i.
|
| 214 |
+
|
| 215 |
+
Examples
|
| 216 |
+
--------
|
| 217 |
+
>>> from pgmpy.base import UndirectedGraph
|
| 218 |
+
>>> G = UndirectedGraph()
|
| 219 |
+
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
|
| 220 |
+
>>> G.add_edges_from(ebunch=[('Alice', 'Bob'), ('Bob', 'Charles')])
|
| 221 |
+
>>> G.nodes()
|
| 222 |
+
NodeView(('Alice', 'Bob', 'Charles'))
|
| 223 |
+
>>> G.edges()
|
| 224 |
+
EdgeView([('Alice', 'Bob'), ('Bob', 'Charles')])
|
| 225 |
+
|
| 226 |
+
When the node is not already in the model:
|
| 227 |
+
>>> G.add_edges_from(ebunch=[('Alice', 'Ankur')])
|
| 228 |
+
>>> G.nodes()
|
| 229 |
+
NodeView(('Alice', 'Ankur', 'Charles', 'Bob'))
|
| 230 |
+
>>> G.edges()
|
| 231 |
+
EdgeView([('Alice', 'Bob'), ('Bob', 'Charles'), ('Alice', 'Ankur')])
|
| 232 |
+
|
| 233 |
+
Adding edges with weights:
|
| 234 |
+
>>> G.add_edges_from([('Ankur', 'Maria'), ('Maria', 'Mason')],
|
| 235 |
+
... weights=[0.3, 0.5])
|
| 236 |
+
>>> G.edge['Ankur']['Maria']
|
| 237 |
+
{'weight': 0.3}
|
| 238 |
+
>>> G.edge['Maria']['Mason']
|
| 239 |
+
{'weight': 0.5}
|
| 240 |
+
"""
|
| 241 |
+
ebunch = list(ebunch)
|
| 242 |
+
|
| 243 |
+
if weights:
|
| 244 |
+
if len(ebunch) != len(weights):
|
| 245 |
+
raise ValueError(
|
| 246 |
+
"The number of elements in ebunch and weights" "should be equal"
|
| 247 |
+
)
|
| 248 |
+
for index in range(len(ebunch)):
|
| 249 |
+
self.add_edge(ebunch[index][0], ebunch[index][1], weight=weights[index])
|
| 250 |
+
else:
|
| 251 |
+
for edge in ebunch:
|
| 252 |
+
self.add_edge(edge[0], edge[1])
|
| 253 |
+
|
| 254 |
+
def is_clique(self, nodes):
|
| 255 |
+
"""
|
| 256 |
+
Check if the given nodes form a clique.
|
| 257 |
+
|
| 258 |
+
Parameters
|
| 259 |
+
----------
|
| 260 |
+
nodes: list, array-like
|
| 261 |
+
List of nodes to check if they are a part of any clique.
|
| 262 |
+
|
| 263 |
+
Examples
|
| 264 |
+
--------
|
| 265 |
+
>>> from pgmpy.base import UndirectedGraph
|
| 266 |
+
>>> G = UndirectedGraph(ebunch=[('A', 'B'), ('C', 'B'), ('B', 'D'),
|
| 267 |
+
('B', 'E'), ('D', 'E'), ('E', 'F'),
|
| 268 |
+
('D', 'F'), ('B', 'F')])
|
| 269 |
+
>>> G.is_clique(nodes=['A', 'B', 'C', 'D'])
|
| 270 |
+
False
|
| 271 |
+
>>> G.is_clique(nodes=['B', 'D', 'E', 'F'])
|
| 272 |
+
True
|
| 273 |
+
|
| 274 |
+
Since B, D, E and F are clique, any subset of these should also
|
| 275 |
+
be clique.
|
| 276 |
+
>>> G.is_clique(nodes=['D', 'E', 'B'])
|
| 277 |
+
True
|
| 278 |
+
"""
|
| 279 |
+
for node1, node2 in itertools.combinations(nodes, 2):
|
| 280 |
+
if not self.has_edge(node1, node2):
|
| 281 |
+
return False
|
| 282 |
+
return True
|
| 283 |
+
|
| 284 |
+
def is_triangulated(self):
|
| 285 |
+
"""
|
| 286 |
+
Checks whether the undirected graph is triangulated (also known
|
| 287 |
+
as chordal) or not.
|
| 288 |
+
|
| 289 |
+
Chordal Graph: A chordal graph is one in which all cycles of four
|
| 290 |
+
or more vertices have a chord.
|
| 291 |
+
|
| 292 |
+
Examples
|
| 293 |
+
--------
|
| 294 |
+
>>> from pgmpy.base import UndirectedGraph
|
| 295 |
+
>>> G = UndirectedGraph()
|
| 296 |
+
>>> G.add_edges_from(ebunch=[('x1', 'x2'), ('x1', 'x3'),
|
| 297 |
+
... ('x2', 'x4'), ('x3', 'x4')])
|
| 298 |
+
>>> G.is_triangulated()
|
| 299 |
+
False
|
| 300 |
+
>>> G.add_edge(u='x1', v='x4')
|
| 301 |
+
>>> G.is_triangulated()
|
| 302 |
+
True
|
| 303 |
+
"""
|
| 304 |
+
return nx.is_chordal(self)
|
testbed/pgmpy__pgmpy/pgmpy/base/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .UndirectedGraph import UndirectedGraph
|
| 2 |
+
from .DAG import DAG, PDAG
|
| 3 |
+
|
| 4 |
+
__all__ = ["UndirectedGraph", "DAG", "PDAG"]
|
testbed/pgmpy__pgmpy/pgmpy/estimators/EM.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import chain, product
|
| 2 |
+
from math import log
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from joblib import Parallel, delayed
|
| 7 |
+
from tqdm.auto import tqdm
|
| 8 |
+
|
| 9 |
+
from pgmpy import config
|
| 10 |
+
from pgmpy.estimators import MaximumLikelihoodEstimator, ParameterEstimator
|
| 11 |
+
from pgmpy.factors.discrete import TabularCPD
|
| 12 |
+
from pgmpy.models import BayesianNetwork
|
| 13 |
+
from pgmpy.base import DAG
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ExpectationMaximization(ParameterEstimator):
|
| 17 |
+
"""
|
| 18 |
+
Class used to compute parameters for a model using Expectation
|
| 19 |
+
Maximization (EM).
|
| 20 |
+
|
| 21 |
+
EM is an iterative algorithm commonly used for
|
| 22 |
+
estimation in the case when there are latent variables in the model.
|
| 23 |
+
The algorithm iteratively improves the parameter estimates maximizing
|
| 24 |
+
the likelihood of the given data.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
model: A pgmpy.models.BayesianNetwork instance
|
| 29 |
+
|
| 30 |
+
data: pandas DataFrame object
|
| 31 |
+
DataFrame object with column names identical to the variable names
|
| 32 |
+
of the network. (If some values in the data are missing the data
|
| 33 |
+
cells should be set to `numpy.nan`. Note that pandas converts each
|
| 34 |
+
column containing `numpy.nan`s to dtype `float`.)
|
| 35 |
+
|
| 36 |
+
state_names: dict (optional)
|
| 37 |
+
A dict indicating, for each variable, the discrete set of states
|
| 38 |
+
that the variable can take. If unspecified, the observed values in
|
| 39 |
+
the data set are taken to be the only possible states.
|
| 40 |
+
|
| 41 |
+
Examples
|
| 42 |
+
--------
|
| 43 |
+
>>> import numpy as np
|
| 44 |
+
>>> import pandas as pd
|
| 45 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 46 |
+
>>> from pgmpy.estimators import ExpectationMaximization
|
| 47 |
+
>>> data = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
|
| 48 |
+
... columns=['A', 'B', 'C', 'D', 'E'])
|
| 49 |
+
>>> model = BayesianNetwork([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
|
| 50 |
+
>>> estimator = ExpectationMaximization(model, data)
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self, model, data, **kwargs):
|
| 54 |
+
if not isinstance(model, (DAG, BayesianNetwork)):
|
| 55 |
+
raise NotImplementedError(
|
| 56 |
+
"Expectation Maximization is only implemented for DAG or BayesianNetwork"
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
if isinstance(model, DAG):
|
| 60 |
+
model_bn = BayesianNetwork(model.edges())
|
| 61 |
+
model_bn.add_nodes_from(model.nodes())
|
| 62 |
+
model_bn.latents = model.latents
|
| 63 |
+
model = model_bn
|
| 64 |
+
|
| 65 |
+
super(ExpectationMaximization, self).__init__(model, data, **kwargs)
|
| 66 |
+
self.model_copy = self.model.copy()
|
| 67 |
+
|
| 68 |
+
def _get_log_likelihood(self, datapoint):
|
| 69 |
+
"""
|
| 70 |
+
Computes the likelihood of a given datapoint. Goes through each
|
| 71 |
+
CPD matching the combination of states to get the value and multiplies
|
| 72 |
+
them together.
|
| 73 |
+
"""
|
| 74 |
+
likelihood = 0
|
| 75 |
+
for cpd in self.model_copy.cpds:
|
| 76 |
+
scope = set(cpd.scope())
|
| 77 |
+
likelihood += log(
|
| 78 |
+
max(
|
| 79 |
+
cpd.get_value(
|
| 80 |
+
**{
|
| 81 |
+
key: value
|
| 82 |
+
for key, value in datapoint.items()
|
| 83 |
+
if key in scope
|
| 84 |
+
}
|
| 85 |
+
),
|
| 86 |
+
1e-10,
|
| 87 |
+
)
|
| 88 |
+
)
|
| 89 |
+
return likelihood
|
| 90 |
+
|
| 91 |
+
def _parallel_compute_weights(
|
| 92 |
+
self, data_unique, latent_card, n_counts, offset, batch_size
|
| 93 |
+
):
|
| 94 |
+
cache = []
|
| 95 |
+
|
| 96 |
+
for i in range(offset, min(offset + batch_size, data_unique.shape[0])):
|
| 97 |
+
v = list(product(*[range(card) for card in latent_card.values()]))
|
| 98 |
+
latent_combinations = np.array(v, dtype=int)
|
| 99 |
+
df = data_unique.iloc[[i] * latent_combinations.shape[0]].reset_index(
|
| 100 |
+
drop=True
|
| 101 |
+
)
|
| 102 |
+
for index, latent_var in enumerate(latent_card.keys()):
|
| 103 |
+
df[latent_var] = latent_combinations[:, index]
|
| 104 |
+
weights = np.e ** (
|
| 105 |
+
df.apply(lambda t: self._get_log_likelihood(dict(t)), axis=1)
|
| 106 |
+
)
|
| 107 |
+
df["_weight"] = (weights / weights.sum()) * n_counts[
|
| 108 |
+
tuple(data_unique.iloc[i])
|
| 109 |
+
]
|
| 110 |
+
cache.append(df)
|
| 111 |
+
|
| 112 |
+
return pd.concat(cache, copy=False)
|
| 113 |
+
|
| 114 |
+
def _compute_weights(self, n_jobs, latent_card, batch_size):
|
| 115 |
+
"""
|
| 116 |
+
For each data point, creates extra data points for each possible combination
|
| 117 |
+
of states of latent variables and assigns weights to each of them.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
data_unique = self.data.drop_duplicates()
|
| 121 |
+
n_counts = (
|
| 122 |
+
self.data.groupby(list(self.data.columns), observed=True).size().to_dict()
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
cache = Parallel(n_jobs=n_jobs)(
|
| 126 |
+
delayed(self._parallel_compute_weights)(
|
| 127 |
+
data_unique, latent_card, n_counts, i, batch_size
|
| 128 |
+
)
|
| 129 |
+
for i in range(0, data_unique.shape[0], batch_size)
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
return pd.concat(cache, copy=False)
|
| 133 |
+
|
| 134 |
+
def _is_converged(self, new_cpds, atol=1e-08):
|
| 135 |
+
"""
|
| 136 |
+
Checks if the values of `new_cpds` is within tolerance limits of current
|
| 137 |
+
model cpds.
|
| 138 |
+
"""
|
| 139 |
+
for cpd in new_cpds:
|
| 140 |
+
if not cpd.__eq__(self.model_copy.get_cpds(node=cpd.scope()[0]), atol=atol):
|
| 141 |
+
return False
|
| 142 |
+
return True
|
| 143 |
+
|
| 144 |
+
def get_parameters(
|
| 145 |
+
self,
|
| 146 |
+
latent_card=None,
|
| 147 |
+
max_iter=100,
|
| 148 |
+
atol=1e-08,
|
| 149 |
+
n_jobs=1,
|
| 150 |
+
batch_size=1000,
|
| 151 |
+
seed=None,
|
| 152 |
+
init_cpds={},
|
| 153 |
+
show_progress=True,
|
| 154 |
+
):
|
| 155 |
+
"""
|
| 156 |
+
Method to estimate all model parameters (CPDs) using Expecation Maximization.
|
| 157 |
+
|
| 158 |
+
Parameters
|
| 159 |
+
----------
|
| 160 |
+
latent_card: dict (default: None)
|
| 161 |
+
A dictionary of the form {latent_var: cardinality} specifying the
|
| 162 |
+
cardinality (number of states) of each latent variable. If None,
|
| 163 |
+
assumes `2` states for each latent variable.
|
| 164 |
+
|
| 165 |
+
max_iter: int (default: 100)
|
| 166 |
+
The maximum number of iterations the algorithm is allowed to run for.
|
| 167 |
+
If max_iter is reached, return the last value of parameters.
|
| 168 |
+
|
| 169 |
+
atol: int (default: 1e-08)
|
| 170 |
+
The absolute accepted tolerance for checking convergence. If the parameters
|
| 171 |
+
change is less than atol in an iteration, the algorithm will exit.
|
| 172 |
+
|
| 173 |
+
n_jobs: int (default: 1)
|
| 174 |
+
Number of jobs to run in parallel.
|
| 175 |
+
Using n_jobs > 1 for small models or datasets might be slower.
|
| 176 |
+
|
| 177 |
+
batch_size: int (default: 1000)
|
| 178 |
+
Number of data used to compute weights in a batch.
|
| 179 |
+
|
| 180 |
+
seed: int
|
| 181 |
+
The random seed to use for generating the intial values.
|
| 182 |
+
|
| 183 |
+
init_cpds: dict
|
| 184 |
+
A dictionary of the form {variable: instance of TabularCPD}
|
| 185 |
+
specifying the initial CPD values for the EM optimizer to start
|
| 186 |
+
with. If not specified, CPDs involving latent variables are
|
| 187 |
+
initialized randomly, and CPDs involving only observed variables are
|
| 188 |
+
initialized with their MLE estimates.
|
| 189 |
+
|
| 190 |
+
show_progress: boolean (default: True)
|
| 191 |
+
Whether to show a progress bar for iterations.
|
| 192 |
+
|
| 193 |
+
Returns
|
| 194 |
+
-------
|
| 195 |
+
Estimated paramters (CPDs): list
|
| 196 |
+
A list of estimated CPDs for the model.
|
| 197 |
+
|
| 198 |
+
Examples
|
| 199 |
+
--------
|
| 200 |
+
>>> import numpy as np
|
| 201 |
+
>>> import pandas as pd
|
| 202 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 203 |
+
>>> from pgmpy.estimators import ExpectationMaximization as EM
|
| 204 |
+
>>> data = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 3)),
|
| 205 |
+
... columns=['A', 'C', 'D'])
|
| 206 |
+
>>> model = BayesianNetwork([('A', 'B'), ('C', 'B'), ('C', 'D')], latents={'B'})
|
| 207 |
+
>>> estimator = EM(model, data)
|
| 208 |
+
>>> estimator.get_parameters(latent_card={'B': 3})
|
| 209 |
+
[<TabularCPD representing P(C:2) at 0x7f7b534251d0>,
|
| 210 |
+
<TabularCPD representing P(B:3 | C:2, A:2) at 0x7f7b4dfd4da0>,
|
| 211 |
+
<TabularCPD representing P(A:2) at 0x7f7b4dfd4fd0>,
|
| 212 |
+
<TabularCPD representing P(D:2 | C:2) at 0x7f7b4df822b0>]
|
| 213 |
+
"""
|
| 214 |
+
# Step 1: Parameter checks
|
| 215 |
+
if latent_card is None:
|
| 216 |
+
latent_card = {var: 2 for var in self.model_copy.latents}
|
| 217 |
+
|
| 218 |
+
# Step 2: Create structures/variables to be used later.
|
| 219 |
+
n_states_dict = {key: len(value) for key, value in self.state_names.items()}
|
| 220 |
+
n_states_dict.update(latent_card)
|
| 221 |
+
for var in self.model_copy.latents:
|
| 222 |
+
self.state_names[var] = list(range(n_states_dict[var]))
|
| 223 |
+
|
| 224 |
+
# Step 3: Initialize CPDs.
|
| 225 |
+
# Step 3.1: Learn the CPDs of variables which don't involve
|
| 226 |
+
# latent variables using MLE if their init_cpd is
|
| 227 |
+
# not specified.
|
| 228 |
+
fixed_cpds = []
|
| 229 |
+
fixed_cpd_vars = (
|
| 230 |
+
set(self.model.nodes())
|
| 231 |
+
- self.model.latents
|
| 232 |
+
- set(chain(*[self.model.get_children(var) for var in self.model.latents]))
|
| 233 |
+
- set(init_cpds.keys())
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
mle = MaximumLikelihoodEstimator.__new__(MaximumLikelihoodEstimator)
|
| 237 |
+
mle.model = self.model
|
| 238 |
+
mle.data = self.data
|
| 239 |
+
mle.state_names = self.state_names
|
| 240 |
+
|
| 241 |
+
for var in fixed_cpd_vars:
|
| 242 |
+
fixed_cpds.append(mle.estimate_cpd(var))
|
| 243 |
+
|
| 244 |
+
# Step 3.2: Randomly initialize the CPDs involving latent variables if init_cpds is not specified.
|
| 245 |
+
latent_cpds = []
|
| 246 |
+
vars_with_latents = (
|
| 247 |
+
set(self.model_copy.nodes()) - fixed_cpd_vars - set(init_cpds.keys())
|
| 248 |
+
)
|
| 249 |
+
for node in vars_with_latents:
|
| 250 |
+
parents = list(self.model_copy.predecessors(node))
|
| 251 |
+
latent_cpds.append(
|
| 252 |
+
TabularCPD.get_random(
|
| 253 |
+
variable=node,
|
| 254 |
+
evidence=parents,
|
| 255 |
+
cardinality={
|
| 256 |
+
var: n_states_dict[var] for var in chain([node], parents)
|
| 257 |
+
},
|
| 258 |
+
state_names={
|
| 259 |
+
var: self.state_names[var] for var in chain([node], parents)
|
| 260 |
+
},
|
| 261 |
+
seed=seed,
|
| 262 |
+
)
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
self.model_copy.add_cpds(
|
| 266 |
+
*list(chain(fixed_cpds, latent_cpds, list(init_cpds.values())))
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
if show_progress and config.SHOW_PROGRESS:
|
| 270 |
+
pbar = tqdm(total=max_iter)
|
| 271 |
+
|
| 272 |
+
mle.model = self.model_copy
|
| 273 |
+
# Step 4: Run the EM algorithm.
|
| 274 |
+
for _ in range(max_iter):
|
| 275 |
+
# Step 4.1: E-step: Expands the dataset and computes the likelihood of each
|
| 276 |
+
# possible state of latent variables.
|
| 277 |
+
weighted_data = self._compute_weights(n_jobs, latent_card, batch_size)
|
| 278 |
+
# Step 4.2: M-step: Uses the weights of the dataset to do a weighted MLE.
|
| 279 |
+
new_cpds = fixed_cpds.copy()
|
| 280 |
+
mle.data = weighted_data
|
| 281 |
+
for var in vars_with_latents.union(set(init_cpds.keys())):
|
| 282 |
+
new_cpds.append(mle.estimate_cpd(var, weighted=True))
|
| 283 |
+
|
| 284 |
+
# Step 4.3: Check of convergence and max_iter
|
| 285 |
+
if self._is_converged(new_cpds, atol=atol):
|
| 286 |
+
if show_progress and config.SHOW_PROGRESS:
|
| 287 |
+
pbar.close()
|
| 288 |
+
return new_cpds
|
| 289 |
+
|
| 290 |
+
else:
|
| 291 |
+
self.model_copy.cpds = new_cpds
|
| 292 |
+
if show_progress and config.SHOW_PROGRESS:
|
| 293 |
+
pbar.update(1)
|
| 294 |
+
|
| 295 |
+
return new_cpds
|
testbed/pgmpy__pgmpy/pgmpy/estimators/ExhaustiveSearch.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
from itertools import combinations
|
| 4 |
+
|
| 5 |
+
import networkx as nx
|
| 6 |
+
|
| 7 |
+
from pgmpy.base import DAG
|
| 8 |
+
from pgmpy.estimators import K2, ScoreCache, StructureEstimator
|
| 9 |
+
from pgmpy.global_vars import logger
|
| 10 |
+
from pgmpy.utils.mathext import powerset
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ExhaustiveSearch(StructureEstimator):
|
| 14 |
+
"""
|
| 15 |
+
Search class for exhaustive searches over all DAGs with a given set of variables.
|
| 16 |
+
Takes a `StructureScore`-Instance as parameter; `estimate` finds the model with maximal score.
|
| 17 |
+
|
| 18 |
+
Parameters
|
| 19 |
+
----------
|
| 20 |
+
data: pandas DataFrame object
|
| 21 |
+
dataframe object where each column represents one variable.
|
| 22 |
+
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
|
| 23 |
+
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
|
| 24 |
+
|
| 25 |
+
scoring_method: Instance of a `StructureScore`-subclass (`K2` is used as default)
|
| 26 |
+
An instance of `K2`, `BDeu`, `BIC` or 'AIC'.
|
| 27 |
+
This score is optimized during structure estimation by the `estimate`-method.
|
| 28 |
+
|
| 29 |
+
state_names: dict (optional)
|
| 30 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 31 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 32 |
+
are taken to be the only possible states.
|
| 33 |
+
|
| 34 |
+
use_caching: boolean
|
| 35 |
+
If True, uses caching of score for faster computation.
|
| 36 |
+
Note: Caching only works for scoring methods which are decomposable. Can
|
| 37 |
+
give wrong results in case of custom scoring methods.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self, data, scoring_method=None, use_cache=True, **kwargs):
|
| 41 |
+
if scoring_method is not None:
|
| 42 |
+
if use_cache:
|
| 43 |
+
self.scoring_method = ScoreCache.ScoreCache(scoring_method, data)
|
| 44 |
+
else:
|
| 45 |
+
self.scoring_method = scoring_method
|
| 46 |
+
else:
|
| 47 |
+
self.scoring_method = ScoreCache.ScoreCache(K2(data, **kwargs), data)
|
| 48 |
+
|
| 49 |
+
super(ExhaustiveSearch, self).__init__(data, **kwargs)
|
| 50 |
+
|
| 51 |
+
def all_dags(self, nodes=None):
|
| 52 |
+
"""
|
| 53 |
+
Computes all possible directed acyclic graphs with a given set of nodes,
|
| 54 |
+
sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes,
|
| 55 |
+
so this is likely not feasible for n>6. This is a generator.
|
| 56 |
+
|
| 57 |
+
Parameters
|
| 58 |
+
----------
|
| 59 |
+
nodes: list of nodes for the DAGs (optional)
|
| 60 |
+
A list of the node names that the generated DAGs should have.
|
| 61 |
+
If not provided, nodes are taken from data.
|
| 62 |
+
|
| 63 |
+
Returns
|
| 64 |
+
-------
|
| 65 |
+
dags: Generator object for nx.DiGraphs
|
| 66 |
+
Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first.
|
| 67 |
+
|
| 68 |
+
Examples
|
| 69 |
+
--------
|
| 70 |
+
>>> import pandas as pd
|
| 71 |
+
>>> from pgmpy.estimators import ExhaustiveSearch
|
| 72 |
+
>>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19],
|
| 73 |
+
'Weather': ['sunny', 'cloudy'],
|
| 74 |
+
'Humidity': [65, 75]}))
|
| 75 |
+
>>> list(s.all_dags())
|
| 76 |
+
[<networkx.classes.digraph.DiGraph object at 0x7f6955216438>,
|
| 77 |
+
<networkx.classes.digraph.DiGraph object at 0x7f6955216518>,
|
| 78 |
+
....
|
| 79 |
+
>>> [dag.edges() for dag in s.all_dags()]
|
| 80 |
+
[[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')],
|
| 81 |
+
[('Temperature', 'Weather')], [('Temperature', 'Humidity')],
|
| 82 |
+
....
|
| 83 |
+
[('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]]
|
| 84 |
+
|
| 85 |
+
"""
|
| 86 |
+
if nodes is None:
|
| 87 |
+
nodes = sorted(self.state_names.keys())
|
| 88 |
+
if len(nodes) > 6:
|
| 89 |
+
logger.info("Generating all DAGs of n nodes likely not feasible for n>6!")
|
| 90 |
+
logger.info(
|
| 91 |
+
"Attempting to search through {n} graphs".format(
|
| 92 |
+
n=2 ** (len(nodes) * (len(nodes) - 1))
|
| 93 |
+
)
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
edges = list(combinations(nodes, 2)) # n*(n-1) possible directed edges
|
| 97 |
+
edges.extend([(y, x) for x, y in edges])
|
| 98 |
+
all_graphs = powerset(edges) # 2^(n*(n-1)) graphs
|
| 99 |
+
|
| 100 |
+
for graph_edges in all_graphs:
|
| 101 |
+
graph = nx.DiGraph(graph_edges)
|
| 102 |
+
graph.add_nodes_from(nodes)
|
| 103 |
+
if nx.is_directed_acyclic_graph(graph):
|
| 104 |
+
yield graph
|
| 105 |
+
|
| 106 |
+
def all_scores(self):
|
| 107 |
+
"""
|
| 108 |
+
Computes a list of DAGs and their structure scores, ordered by score.
|
| 109 |
+
|
| 110 |
+
Returns
|
| 111 |
+
-------
|
| 112 |
+
A list of (score, dag) pairs: list
|
| 113 |
+
A list of (score, dag)-tuples, where score is a float and model a acyclic nx.DiGraph.
|
| 114 |
+
The list is ordered by score values.
|
| 115 |
+
|
| 116 |
+
Examples
|
| 117 |
+
--------
|
| 118 |
+
>>> import pandas as pd
|
| 119 |
+
>>> import numpy as np
|
| 120 |
+
>>> from pgmpy.estimators import ExhaustiveSearch, K2
|
| 121 |
+
>>> # create random data sample with 3 variables, where B and C are identical:
|
| 122 |
+
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB'))
|
| 123 |
+
>>> data['C'] = data['B']
|
| 124 |
+
>>> searcher = ExhaustiveSearch(data, scoring_method=K2(data))
|
| 125 |
+
>>> for score, model in searcher.all_scores():
|
| 126 |
+
... print("{0}\t{1}".format(score, model.edges()))
|
| 127 |
+
-24234.44977974726 [('A', 'B'), ('A', 'C')]
|
| 128 |
+
-24234.449760691063 [('A', 'B'), ('C', 'A')]
|
| 129 |
+
-24234.449760691063 [('A', 'C'), ('B', 'A')]
|
| 130 |
+
-24203.700955937973 [('A', 'B')]
|
| 131 |
+
-24203.700955937973 [('A', 'C')]
|
| 132 |
+
-24203.700936881774 [('B', 'A')]
|
| 133 |
+
-24203.700936881774 [('C', 'A')]
|
| 134 |
+
-24203.700936881774 [('B', 'A'), ('C', 'A')]
|
| 135 |
+
-24172.952132128685 []
|
| 136 |
+
-16597.30920265254 [('A', 'B'), ('A', 'C'), ('B', 'C')]
|
| 137 |
+
-16597.30920265254 [('A', 'B'), ('A', 'C'), ('C', 'B')]
|
| 138 |
+
-16597.309183596342 [('A', 'B'), ('C', 'A'), ('C', 'B')]
|
| 139 |
+
-16597.309183596342 [('A', 'C'), ('B', 'A'), ('B', 'C')]
|
| 140 |
+
-16566.560378843253 [('A', 'B'), ('C', 'B')]
|
| 141 |
+
-16566.560378843253 [('A', 'C'), ('B', 'C')]
|
| 142 |
+
-16268.324549347722 [('A', 'B'), ('B', 'C')]
|
| 143 |
+
-16268.324549347722 [('A', 'C'), ('C', 'B')]
|
| 144 |
+
-16268.324530291524 [('B', 'A'), ('B', 'C')]
|
| 145 |
+
-16268.324530291524 [('B', 'C'), ('C', 'A')]
|
| 146 |
+
-16268.324530291524 [('B', 'A'), ('C', 'B')]
|
| 147 |
+
-16268.324530291524 [('C', 'A'), ('C', 'B')]
|
| 148 |
+
-16268.324530291524 [('B', 'A'), ('B', 'C'), ('C', 'A')]
|
| 149 |
+
-16268.324530291524 [('B', 'A'), ('C', 'A'), ('C', 'B')]
|
| 150 |
+
-16237.575725538434 [('B', 'C')]
|
| 151 |
+
-16237.575725538434 [('C', 'B')]
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
scored_dags = sorted(
|
| 155 |
+
[(self.scoring_method.score(dag), dag) for dag in self.all_dags()],
|
| 156 |
+
key=lambda x: x[0],
|
| 157 |
+
)
|
| 158 |
+
return scored_dags
|
| 159 |
+
|
| 160 |
+
def estimate(self):
|
| 161 |
+
"""
|
| 162 |
+
Estimates the `DAG` structure that fits best to the given data set,
|
| 163 |
+
according to the scoring method supplied in the constructor.
|
| 164 |
+
Exhaustively searches through all models. Only estimates network structure, no parametrization.
|
| 165 |
+
|
| 166 |
+
Returns
|
| 167 |
+
-------
|
| 168 |
+
Estimated Model: pgmpy.base.DAG
|
| 169 |
+
A `DAG` with maximal score.
|
| 170 |
+
|
| 171 |
+
Examples
|
| 172 |
+
--------
|
| 173 |
+
>>> import pandas as pd
|
| 174 |
+
>>> import numpy as np
|
| 175 |
+
>>> from pgmpy.estimators import ExhaustiveSearch
|
| 176 |
+
>>> # create random data sample with 3 variables, where B and C are identical:
|
| 177 |
+
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB'))
|
| 178 |
+
>>> data['C'] = data['B']
|
| 179 |
+
>>> est = ExhaustiveSearch(data)
|
| 180 |
+
>>> best_model = est.estimate()
|
| 181 |
+
>>> best_model
|
| 182 |
+
<pgmpy.base.DAG.DAG object at 0x7f695c535470>
|
| 183 |
+
>>> best_model.edges()
|
| 184 |
+
[('B', 'C')]
|
| 185 |
+
"""
|
| 186 |
+
|
| 187 |
+
best_dag = max(self.all_dags(), key=self.scoring_method.score)
|
| 188 |
+
|
| 189 |
+
best_model = DAG()
|
| 190 |
+
best_model.add_nodes_from(sorted(best_dag.nodes()))
|
| 191 |
+
best_model.add_edges_from(sorted(best_dag.edges()))
|
| 192 |
+
return best_model
|
testbed/pgmpy__pgmpy/pgmpy/estimators/GES.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import combinations
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from pgmpy import config
|
| 7 |
+
from pgmpy.base import DAG
|
| 8 |
+
from pgmpy.estimators import (
|
| 9 |
+
AIC,
|
| 10 |
+
BIC,
|
| 11 |
+
K2,
|
| 12 |
+
AICCondGauss,
|
| 13 |
+
AICGauss,
|
| 14 |
+
BDeu,
|
| 15 |
+
BDs,
|
| 16 |
+
BICCondGauss,
|
| 17 |
+
BICGauss,
|
| 18 |
+
LogLikelihoodCondGauss,
|
| 19 |
+
LogLikelihoodGauss,
|
| 20 |
+
ScoreCache,
|
| 21 |
+
StructureEstimator,
|
| 22 |
+
StructureScore,
|
| 23 |
+
)
|
| 24 |
+
from pgmpy.global_vars import logger
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class GES(StructureEstimator):
|
| 28 |
+
"""
|
| 29 |
+
Implementation of Greedy Equivalence Search (GES) causal discovery / structure learning algorithm.
|
| 30 |
+
|
| 31 |
+
GES is a score-based casual discovery / structure learning algorithm that works in three phases:
|
| 32 |
+
1. Forward phase: New edges are added such that the model score improves.
|
| 33 |
+
2. Backward phase: Edges are removed from the model such that the model score improves.
|
| 34 |
+
3. Edge flipping phase: Edge orientations are flipped such that model score improves.
|
| 35 |
+
|
| 36 |
+
Parameters
|
| 37 |
+
----------
|
| 38 |
+
data: pandas DataFrame object
|
| 39 |
+
dataframe object where each column represents one variable.
|
| 40 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 41 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 42 |
+
|
| 43 |
+
use_caching: boolean
|
| 44 |
+
If True, uses caching of score for faster computation.
|
| 45 |
+
Note: Caching only works for scoring methods which are decomposable. Can
|
| 46 |
+
give wrong results in case of custom scoring methods.
|
| 47 |
+
|
| 48 |
+
References
|
| 49 |
+
----------
|
| 50 |
+
Chickering, David Maxwell. "Optimal structure identification with greedy search." Journal of machine learning research 3.Nov (2002): 507-554.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self, data, use_cache=True, **kwargs):
|
| 54 |
+
self.use_cache = use_cache
|
| 55 |
+
|
| 56 |
+
super(GES, self).__init__(data=data, **kwargs)
|
| 57 |
+
|
| 58 |
+
def _legal_edge_additions(self, current_model):
|
| 59 |
+
"""
|
| 60 |
+
Returns a list of all edges that can be added to the graph such that it remains a DAG.
|
| 61 |
+
"""
|
| 62 |
+
edges = []
|
| 63 |
+
for u, v in combinations(current_model.nodes(), 2):
|
| 64 |
+
if not (current_model.has_edge(u, v) or current_model.has_edge(v, u)):
|
| 65 |
+
if not nx.has_path(current_model, v, u):
|
| 66 |
+
edges.append((u, v))
|
| 67 |
+
if not nx.has_path(current_model, u, v):
|
| 68 |
+
edges.append((v, u))
|
| 69 |
+
return edges
|
| 70 |
+
|
| 71 |
+
def _legal_edge_flips(self, current_model):
|
| 72 |
+
"""
|
| 73 |
+
Returns a list of all the edges in the `current_model` that can be flipped such that the model
|
| 74 |
+
remains a DAG.
|
| 75 |
+
"""
|
| 76 |
+
potential_flips = []
|
| 77 |
+
edges = list(current_model.edges())
|
| 78 |
+
for u, v in edges:
|
| 79 |
+
current_model.remove_edge(u, v)
|
| 80 |
+
if not nx.has_path(current_model, u, v):
|
| 81 |
+
potential_flips.append((v, u))
|
| 82 |
+
|
| 83 |
+
# Restore the edge to get to the original model
|
| 84 |
+
current_model.add_edge(u, v)
|
| 85 |
+
return potential_flips
|
| 86 |
+
|
| 87 |
+
def estimate(self, scoring_method="bic-d", min_improvement=1e-6, debug=False):
|
| 88 |
+
"""
|
| 89 |
+
Estimates the DAG from the data.
|
| 90 |
+
|
| 91 |
+
Parameters
|
| 92 |
+
----------
|
| 93 |
+
scoring_method: str or StructureScore instance
|
| 94 |
+
The score to be optimized during structure estimation. Supported
|
| 95 |
+
structure scores: k2, bdeu, bds, bic-d, aic-d, ll-g, aic-g, bic-g,
|
| 96 |
+
ll-cg, aic-cg, bic-cg. Also accepts a custom score, but it should
|
| 97 |
+
be an instance of `StructureScore`.
|
| 98 |
+
|
| 99 |
+
min_improvement: float
|
| 100 |
+
The operation (edge addition, removal, or flipping) would only be performed if the
|
| 101 |
+
model score improves by atleast `min_improvement`.
|
| 102 |
+
|
| 103 |
+
Returns
|
| 104 |
+
-------
|
| 105 |
+
Estimated model: pgmpy.base.DAG
|
| 106 |
+
A `DAG` at a (local) score maximum.
|
| 107 |
+
|
| 108 |
+
Examples
|
| 109 |
+
--------
|
| 110 |
+
>>> # Simulate some sample data from a known model to learn the model structure from
|
| 111 |
+
>>> from pgmpy.utils import get_example_model
|
| 112 |
+
>>> model = get_example_model('alarm')
|
| 113 |
+
>>> df = model.simulate(int(1e3))
|
| 114 |
+
|
| 115 |
+
>>> # Learn the model structure using GES algorithm from `df`
|
| 116 |
+
>>> from pgmpy.estimators import GES
|
| 117 |
+
>>> est = GES(data)
|
| 118 |
+
>>> dag = est.estimate(scoring_method='bic-d')
|
| 119 |
+
>>> len(dag.nodes())
|
| 120 |
+
37
|
| 121 |
+
>>> len(dag.edges())
|
| 122 |
+
45
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
# Step 0: Initial checks and setup for arguments
|
| 126 |
+
supported_methods = {
|
| 127 |
+
"k2": K2,
|
| 128 |
+
"bdeu": BDeu,
|
| 129 |
+
"bds": BDs,
|
| 130 |
+
"bic-d": BIC,
|
| 131 |
+
"aic-d": AIC,
|
| 132 |
+
"ll-g": LogLikelihoodGauss,
|
| 133 |
+
"aic-g": AICGauss,
|
| 134 |
+
"bic-g": BICGauss,
|
| 135 |
+
"ll-cg": LogLikelihoodCondGauss,
|
| 136 |
+
"aic-cg": AICCondGauss,
|
| 137 |
+
"bic-cg": BICCondGauss,
|
| 138 |
+
}
|
| 139 |
+
if isinstance(scoring_method, str):
|
| 140 |
+
if scoring_method.lower() in [
|
| 141 |
+
"k2score",
|
| 142 |
+
"bdeuscore",
|
| 143 |
+
"bdsscore",
|
| 144 |
+
"bicscore",
|
| 145 |
+
"aicscore",
|
| 146 |
+
]:
|
| 147 |
+
raise ValueError(
|
| 148 |
+
f"The scoring method names have been changed. Please refer the documentation."
|
| 149 |
+
)
|
| 150 |
+
elif scoring_method.lower() not in list(supported_methods.keys()):
|
| 151 |
+
raise ValueError(
|
| 152 |
+
f"Unknown scoring method. Please refer documentation for a list of supported score metrics."
|
| 153 |
+
)
|
| 154 |
+
elif not isinstance(scoring_method, StructureScore):
|
| 155 |
+
raise ValueError(
|
| 156 |
+
"scoring_method should either be one of k2score, bdeuscore, bicscore, bdsscore, aicscore, or an instance of StructureScore"
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
if isinstance(scoring_method, str):
|
| 160 |
+
score = supported_methods[scoring_method.lower()](data=self.data)
|
| 161 |
+
else:
|
| 162 |
+
score = scoring_method
|
| 163 |
+
|
| 164 |
+
if self.use_cache:
|
| 165 |
+
score_fn = ScoreCache(score, self.data).local_score
|
| 166 |
+
else:
|
| 167 |
+
score_fn = score.local_score
|
| 168 |
+
|
| 169 |
+
# Step 1: Initialize an empty model.
|
| 170 |
+
current_model = DAG()
|
| 171 |
+
current_model.add_nodes_from(list(self.data.columns))
|
| 172 |
+
|
| 173 |
+
# Step 2: Forward step: Iteratively add edges till score stops improving.
|
| 174 |
+
while True:
|
| 175 |
+
potential_edges = self._legal_edge_additions(current_model)
|
| 176 |
+
score_deltas = np.zeros(len(potential_edges))
|
| 177 |
+
for index, (u, v) in enumerate(potential_edges):
|
| 178 |
+
current_parents = current_model.get_parents(v)
|
| 179 |
+
score_delta = score_fn(v, current_parents + [u]) - score_fn(
|
| 180 |
+
v, current_parents
|
| 181 |
+
)
|
| 182 |
+
score_deltas[index] = score_delta
|
| 183 |
+
|
| 184 |
+
if (len(potential_edges) == 0) or (np.all(score_deltas < min_improvement)):
|
| 185 |
+
break
|
| 186 |
+
|
| 187 |
+
edge_to_add = potential_edges[np.argmax(score_deltas)]
|
| 188 |
+
current_model.add_edge(edge_to_add[0], edge_to_add[1])
|
| 189 |
+
if debug:
|
| 190 |
+
logger.info(
|
| 191 |
+
f"Adding edge {edge_to_add[0]} -> {edge_to_add[1]}. Improves score by: {score_deltas.max()}"
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
# Step 3: Backward Step: Iteratively remove edges till score stops improving.
|
| 195 |
+
while True:
|
| 196 |
+
potential_removals = list(current_model.edges())
|
| 197 |
+
score_deltas = np.zeros(len(potential_removals))
|
| 198 |
+
|
| 199 |
+
for index, (u, v) in enumerate(potential_removals):
|
| 200 |
+
current_parents = current_model.get_parents(v)
|
| 201 |
+
score_deltas[index] = score_fn(
|
| 202 |
+
v, [node for node in current_parents if node != u]
|
| 203 |
+
) - score_fn(v, current_parents)
|
| 204 |
+
if (len(potential_removals) == 0) or (
|
| 205 |
+
np.all(score_deltas < min_improvement)
|
| 206 |
+
):
|
| 207 |
+
break
|
| 208 |
+
edge_to_remove = potential_removals[np.argmax(score_deltas)]
|
| 209 |
+
current_model.remove_edge(edge_to_remove[0], edge_to_remove[1])
|
| 210 |
+
if debug:
|
| 211 |
+
logger.info(
|
| 212 |
+
f"Removing edge {edge_to_remove[0]} -> {edge_to_remove[1]}. Improves score by: {score_deltas.max()}"
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Step 4: Flip Edges: Iteratively try to flip edges till score stops improving.
|
| 216 |
+
while True:
|
| 217 |
+
potential_flips = self._legal_edge_flips(current_model)
|
| 218 |
+
score_deltas = np.zeros(len(potential_flips))
|
| 219 |
+
for index, (u, v) in enumerate(potential_flips):
|
| 220 |
+
v_parents = current_model.get_parents(v)
|
| 221 |
+
u_parents = current_model.get_parents(u)
|
| 222 |
+
score_deltas[index] = (
|
| 223 |
+
score_fn(v, v_parents + [u]) - score_fn(v, v_parents)
|
| 224 |
+
) + (
|
| 225 |
+
score_fn(u, [node for node in u_parents if node != v])
|
| 226 |
+
- score_fn(u, u_parents)
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
if (len(potential_flips) == 0) or (np.all(score_deltas < min_improvement)):
|
| 230 |
+
break
|
| 231 |
+
edge_to_flip = potential_flips[np.argmax(score_deltas)]
|
| 232 |
+
current_model.remove_edge(edge_to_flip[1], edge_to_flip[0])
|
| 233 |
+
current_model.add_edge(edge_to_flip[0], edge_to_flip[1])
|
| 234 |
+
if debug:
|
| 235 |
+
logger.info(
|
| 236 |
+
f"Fliping edge {edge_to_flip[1]} -> {edge_to_flip[0]}. Improves score by: {score_deltas.max()}"
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# Step 5: Return the model.
|
| 240 |
+
return current_model
|
testbed/pgmpy__pgmpy/pgmpy/estimators/HillClimbSearch.py
ADDED
|
@@ -0,0 +1,351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
from collections import deque
|
| 3 |
+
from itertools import permutations
|
| 4 |
+
|
| 5 |
+
import networkx as nx
|
| 6 |
+
from tqdm.auto import trange
|
| 7 |
+
|
| 8 |
+
from pgmpy import config
|
| 9 |
+
from pgmpy.base import DAG
|
| 10 |
+
from pgmpy.estimators import (
|
| 11 |
+
AIC,
|
| 12 |
+
BIC,
|
| 13 |
+
K2,
|
| 14 |
+
AICCondGauss,
|
| 15 |
+
AICGauss,
|
| 16 |
+
BDeu,
|
| 17 |
+
BDs,
|
| 18 |
+
BICCondGauss,
|
| 19 |
+
BICGauss,
|
| 20 |
+
LogLikelihoodCondGauss,
|
| 21 |
+
LogLikelihoodGauss,
|
| 22 |
+
ScoreCache,
|
| 23 |
+
StructureEstimator,
|
| 24 |
+
StructureScore,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class HillClimbSearch(StructureEstimator):
|
| 29 |
+
"""
|
| 30 |
+
Class for heuristic hill climb searches for DAGs, to learn
|
| 31 |
+
network structure from data. `estimate` attempts to find a model with optimal score.
|
| 32 |
+
|
| 33 |
+
Parameters
|
| 34 |
+
----------
|
| 35 |
+
data: pandas DataFrame object
|
| 36 |
+
dataframe object where each column represents one variable.
|
| 37 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 38 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 39 |
+
|
| 40 |
+
state_names: dict (optional)
|
| 41 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 42 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 43 |
+
are taken to be the only possible states.
|
| 44 |
+
|
| 45 |
+
use_caching: boolean
|
| 46 |
+
If True, uses caching of score for faster computation.
|
| 47 |
+
Note: Caching only works for scoring methods which are decomposable. Can
|
| 48 |
+
give wrong results in case of custom scoring methods.
|
| 49 |
+
|
| 50 |
+
References
|
| 51 |
+
----------
|
| 52 |
+
Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
|
| 53 |
+
Section 18.4.3 (page 811ff)
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, data, use_cache=True, **kwargs):
|
| 57 |
+
self.use_cache = use_cache
|
| 58 |
+
|
| 59 |
+
super(HillClimbSearch, self).__init__(data, **kwargs)
|
| 60 |
+
|
| 61 |
+
def _legal_operations(
|
| 62 |
+
self,
|
| 63 |
+
model,
|
| 64 |
+
score,
|
| 65 |
+
structure_score,
|
| 66 |
+
tabu_list,
|
| 67 |
+
max_indegree,
|
| 68 |
+
black_list,
|
| 69 |
+
white_list,
|
| 70 |
+
fixed_edges,
|
| 71 |
+
):
|
| 72 |
+
"""Generates a list of legal (= not in tabu_list) graph modifications
|
| 73 |
+
for a given model, together with their score changes. Possible graph modifications:
|
| 74 |
+
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
|
| 75 |
+
see Koller & Friedman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
|
| 76 |
+
If a number `max_indegree` is provided, only modifications that keep the number
|
| 77 |
+
of parents for each node below `max_indegree` are considered. A list of
|
| 78 |
+
edges can optionally be passed as `black_list` or `white_list` to exclude those
|
| 79 |
+
edges or to limit the search.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
tabu_list = set(tabu_list)
|
| 83 |
+
|
| 84 |
+
# Step 1: Get all legal operations for adding edges.
|
| 85 |
+
potential_new_edges = (
|
| 86 |
+
set(permutations(self.variables, 2))
|
| 87 |
+
- set(model.edges())
|
| 88 |
+
- set([(Y, X) for (X, Y) in model.edges()])
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
for X, Y in potential_new_edges:
|
| 92 |
+
# Check if adding (X, Y) will create a cycle.
|
| 93 |
+
if not nx.has_path(model, Y, X):
|
| 94 |
+
operation = ("+", (X, Y))
|
| 95 |
+
if (
|
| 96 |
+
(operation not in tabu_list)
|
| 97 |
+
and ((X, Y) not in black_list)
|
| 98 |
+
and ((X, Y) in white_list)
|
| 99 |
+
):
|
| 100 |
+
old_parents = model.get_parents(Y)
|
| 101 |
+
new_parents = old_parents + [X]
|
| 102 |
+
if len(new_parents) <= max_indegree:
|
| 103 |
+
score_delta = score(Y, new_parents) - score(Y, old_parents)
|
| 104 |
+
score_delta += structure_score("+")
|
| 105 |
+
yield (operation, score_delta)
|
| 106 |
+
|
| 107 |
+
# Step 2: Get all legal operations for removing edges
|
| 108 |
+
for X, Y in model.edges():
|
| 109 |
+
operation = ("-", (X, Y))
|
| 110 |
+
if (operation not in tabu_list) and ((X, Y) not in fixed_edges):
|
| 111 |
+
old_parents = model.get_parents(Y)
|
| 112 |
+
new_parents = [var for var in old_parents if var != X]
|
| 113 |
+
score_delta = score(Y, new_parents) - score(Y, old_parents)
|
| 114 |
+
score_delta += structure_score("-")
|
| 115 |
+
yield (operation, score_delta)
|
| 116 |
+
|
| 117 |
+
# Step 3: Get all legal operations for flipping edges
|
| 118 |
+
for X, Y in model.edges():
|
| 119 |
+
# Check if flipping creates any cycles
|
| 120 |
+
if not any(
|
| 121 |
+
map(lambda path: len(path) > 2, nx.all_simple_paths(model, X, Y))
|
| 122 |
+
):
|
| 123 |
+
operation = ("flip", (X, Y))
|
| 124 |
+
if (
|
| 125 |
+
((operation not in tabu_list) and ("flip", (Y, X)) not in tabu_list)
|
| 126 |
+
and ((X, Y) not in fixed_edges)
|
| 127 |
+
and ((Y, X) not in black_list)
|
| 128 |
+
and ((Y, X) in white_list)
|
| 129 |
+
):
|
| 130 |
+
old_X_parents = model.get_parents(X)
|
| 131 |
+
old_Y_parents = model.get_parents(Y)
|
| 132 |
+
new_X_parents = old_X_parents + [Y]
|
| 133 |
+
new_Y_parents = [var for var in old_Y_parents if var != X]
|
| 134 |
+
if len(new_X_parents) <= max_indegree:
|
| 135 |
+
score_delta = (
|
| 136 |
+
score(X, new_X_parents)
|
| 137 |
+
+ score(Y, new_Y_parents)
|
| 138 |
+
- score(X, old_X_parents)
|
| 139 |
+
- score(Y, old_Y_parents)
|
| 140 |
+
)
|
| 141 |
+
score_delta += structure_score("flip")
|
| 142 |
+
yield (operation, score_delta)
|
| 143 |
+
|
| 144 |
+
def estimate(
|
| 145 |
+
self,
|
| 146 |
+
scoring_method="k2",
|
| 147 |
+
start_dag=None,
|
| 148 |
+
fixed_edges=set(),
|
| 149 |
+
tabu_length=100,
|
| 150 |
+
max_indegree=None,
|
| 151 |
+
black_list=None,
|
| 152 |
+
white_list=None,
|
| 153 |
+
epsilon=1e-4,
|
| 154 |
+
max_iter=1e6,
|
| 155 |
+
show_progress=True,
|
| 156 |
+
):
|
| 157 |
+
"""
|
| 158 |
+
Performs local hill climb search to estimates the `DAG` structure that
|
| 159 |
+
has optimal score, according to the scoring method supplied. Starts at
|
| 160 |
+
model `start_dag` and proceeds by step-by-step network modifications
|
| 161 |
+
until a local maximum is reached. Only estimates network structure, no
|
| 162 |
+
parametrization.
|
| 163 |
+
|
| 164 |
+
Parameters
|
| 165 |
+
----------
|
| 166 |
+
scoring_method: str or StructureScore instance
|
| 167 |
+
The score to be optimized during structure estimation. Supported
|
| 168 |
+
structure scores: k2, bdeu, bds, bic-d, aic-d, ll-g, aic-g, bic-g,
|
| 169 |
+
ll-cg, aic-cg, bic-cg. Also accepts a custom score, but it should
|
| 170 |
+
be an instance of `StructureScore`.
|
| 171 |
+
|
| 172 |
+
start_dag: DAG instance
|
| 173 |
+
The starting point for the local search. By default, a completely
|
| 174 |
+
disconnected network is used.
|
| 175 |
+
|
| 176 |
+
fixed_edges: iterable
|
| 177 |
+
A list of edges that will always be there in the final learned model.
|
| 178 |
+
The algorithm will add these edges at the start of the algorithm and
|
| 179 |
+
will never change it.
|
| 180 |
+
|
| 181 |
+
tabu_length: int
|
| 182 |
+
If provided, the last `tabu_length` graph modifications cannot be reversed
|
| 183 |
+
during the search procedure. This serves to enforce a wider exploration
|
| 184 |
+
of the search space. Default value: 100.
|
| 185 |
+
|
| 186 |
+
max_indegree: int or None
|
| 187 |
+
If provided and unequal None, the procedure only searches among models
|
| 188 |
+
where all nodes have at most `max_indegree` parents. Defaults to None.
|
| 189 |
+
|
| 190 |
+
black_list: list or None
|
| 191 |
+
If a list of edges is provided as `black_list`, they are excluded from the search
|
| 192 |
+
and the resulting model will not contain any of those edges. Default: None
|
| 193 |
+
|
| 194 |
+
white_list: list or None
|
| 195 |
+
If a list of edges is provided as `white_list`, the search is limited to those
|
| 196 |
+
edges. The resulting model will then only contain edges that are in `white_list`.
|
| 197 |
+
Default: None
|
| 198 |
+
|
| 199 |
+
epsilon: float (default: 1e-4)
|
| 200 |
+
Defines the exit condition. If the improvement in score is less than `epsilon`,
|
| 201 |
+
the learned model is returned.
|
| 202 |
+
|
| 203 |
+
max_iter: int (default: 1e6)
|
| 204 |
+
The maximum number of iterations allowed. Returns the learned model when the
|
| 205 |
+
number of iterations is greater than `max_iter`.
|
| 206 |
+
|
| 207 |
+
Returns
|
| 208 |
+
-------
|
| 209 |
+
Estimated model: pgmpy.base.DAG
|
| 210 |
+
A `DAG` at a (local) score maximum.
|
| 211 |
+
|
| 212 |
+
Examples
|
| 213 |
+
--------
|
| 214 |
+
>>> # Simulate some sample data from a known model to learn the model structure from
|
| 215 |
+
>>> from pgmpy.utils import get_example_model
|
| 216 |
+
>>> model = get_example_model('alarm')
|
| 217 |
+
>>> df = model.simulate(int(1e3))
|
| 218 |
+
|
| 219 |
+
>>> # Learn the model structure using HillClimbSearch algorithm from `df`
|
| 220 |
+
>>> from pgmpy.estimators import HillClimbSearch
|
| 221 |
+
>>> est = HillClimbSearch(data)
|
| 222 |
+
>>> dag = est.estimate(scoring_method='bic-d')
|
| 223 |
+
>>> len(dag.nodes())
|
| 224 |
+
37
|
| 225 |
+
>>> len(dag.edges())
|
| 226 |
+
45
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
# Step 1: Initial checks and setup for arguments
|
| 230 |
+
# Step 1.1: Check scoring_method
|
| 231 |
+
supported_methods = {
|
| 232 |
+
"k2": K2,
|
| 233 |
+
"bdeu": BDeu,
|
| 234 |
+
"bds": BDs,
|
| 235 |
+
"bic-d": BIC,
|
| 236 |
+
"aic-d": AIC,
|
| 237 |
+
"ll-g": LogLikelihoodGauss,
|
| 238 |
+
"aic-g": AICGauss,
|
| 239 |
+
"bic-g": BICGauss,
|
| 240 |
+
"ll-cg": LogLikelihoodCondGauss,
|
| 241 |
+
"aic-cg": AICCondGauss,
|
| 242 |
+
"bic-cg": BICCondGauss,
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
if isinstance(scoring_method, str):
|
| 246 |
+
if scoring_method.lower() in [
|
| 247 |
+
"k2score",
|
| 248 |
+
"bdeuscore",
|
| 249 |
+
"bdsscore",
|
| 250 |
+
"bicscore",
|
| 251 |
+
"aicscore",
|
| 252 |
+
]:
|
| 253 |
+
raise ValueError(
|
| 254 |
+
f"The scoring method names have been changed. Please refer the documentation."
|
| 255 |
+
)
|
| 256 |
+
elif scoring_method.lower() not in list(supported_methods.keys()):
|
| 257 |
+
raise ValueError(
|
| 258 |
+
f"Unknown scoring method. Please refer documentation for a list of supported score metrics."
|
| 259 |
+
)
|
| 260 |
+
elif not isinstance(scoring_method, StructureScore):
|
| 261 |
+
raise ValueError(
|
| 262 |
+
"scoring_method should either be one of k2score, bdeuscore, bicscore, bdsscore, aicscore, or an instance of StructureScore"
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
if isinstance(scoring_method, str):
|
| 266 |
+
score = supported_methods[scoring_method.lower()](data=self.data)
|
| 267 |
+
else:
|
| 268 |
+
score = scoring_method
|
| 269 |
+
|
| 270 |
+
if self.use_cache:
|
| 271 |
+
score_fn = ScoreCache.ScoreCache(score, self.data).local_score
|
| 272 |
+
else:
|
| 273 |
+
score_fn = score.local_score
|
| 274 |
+
|
| 275 |
+
# Step 1.2: Check the start_dag
|
| 276 |
+
if start_dag is None:
|
| 277 |
+
start_dag = DAG()
|
| 278 |
+
start_dag.add_nodes_from(self.variables)
|
| 279 |
+
elif not isinstance(start_dag, DAG) or not set(start_dag.nodes()) == set(
|
| 280 |
+
self.variables
|
| 281 |
+
):
|
| 282 |
+
raise ValueError(
|
| 283 |
+
"'start_dag' should be a DAG with the same variables as the data set, or 'None'."
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# Step 1.3: Check fixed_edges
|
| 287 |
+
if not hasattr(fixed_edges, "__iter__"):
|
| 288 |
+
raise ValueError("fixed_edges must be an iterable")
|
| 289 |
+
else:
|
| 290 |
+
fixed_edges = set(fixed_edges)
|
| 291 |
+
start_dag.add_edges_from(fixed_edges)
|
| 292 |
+
if not nx.is_directed_acyclic_graph(start_dag):
|
| 293 |
+
raise ValueError(
|
| 294 |
+
"fixed_edges creates a cycle in start_dag. Please modify either fixed_edges or start_dag."
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
# Step 1.4: Check black list and white list
|
| 298 |
+
black_list = set() if black_list is None else set(black_list)
|
| 299 |
+
white_list = (
|
| 300 |
+
set([(u, v) for u in self.variables for v in self.variables])
|
| 301 |
+
if white_list is None
|
| 302 |
+
else set(white_list)
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# Step 1.5: Initialize max_indegree, tabu_list, and progress bar
|
| 306 |
+
if max_indegree is None:
|
| 307 |
+
max_indegree = float("inf")
|
| 308 |
+
|
| 309 |
+
tabu_list = deque(maxlen=tabu_length)
|
| 310 |
+
current_model = start_dag
|
| 311 |
+
|
| 312 |
+
if show_progress and config.SHOW_PROGRESS:
|
| 313 |
+
iteration = trange(int(max_iter))
|
| 314 |
+
else:
|
| 315 |
+
iteration = range(int(max_iter))
|
| 316 |
+
|
| 317 |
+
# Step 2: For each iteration, find the best scoring operation and
|
| 318 |
+
# do that to the current model. If no legal operation is
|
| 319 |
+
# possible, sets best_operation=None.
|
| 320 |
+
for _ in iteration:
|
| 321 |
+
best_operation, best_score_delta = max(
|
| 322 |
+
self._legal_operations(
|
| 323 |
+
current_model,
|
| 324 |
+
score_fn,
|
| 325 |
+
score.structure_prior_ratio,
|
| 326 |
+
tabu_list,
|
| 327 |
+
max_indegree,
|
| 328 |
+
black_list,
|
| 329 |
+
white_list,
|
| 330 |
+
fixed_edges,
|
| 331 |
+
),
|
| 332 |
+
key=lambda t: t[1],
|
| 333 |
+
default=(None, None),
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
if best_operation is None or best_score_delta < epsilon:
|
| 337 |
+
break
|
| 338 |
+
elif best_operation[0] == "+":
|
| 339 |
+
current_model.add_edge(*best_operation[1])
|
| 340 |
+
tabu_list.append(("-", best_operation[1]))
|
| 341 |
+
elif best_operation[0] == "-":
|
| 342 |
+
current_model.remove_edge(*best_operation[1])
|
| 343 |
+
tabu_list.append(("+", best_operation[1]))
|
| 344 |
+
elif best_operation[0] == "flip":
|
| 345 |
+
X, Y = best_operation[1]
|
| 346 |
+
current_model.remove_edge(X, Y)
|
| 347 |
+
current_model.add_edge(Y, X)
|
| 348 |
+
tabu_list.append(best_operation)
|
| 349 |
+
|
| 350 |
+
# Step 3: Return if no more improvements or maximum iterations reached.
|
| 351 |
+
return current_model
|
testbed/pgmpy__pgmpy/pgmpy/estimators/LinearModel.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import statsmodels.api as sm
|
| 2 |
+
from statsmodels.api import OLS, GLS, WLS
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class LinearEstimator(object):
|
| 6 |
+
"""
|
| 7 |
+
A simple linear model built on statmodels.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
def __init__(self, graph, estimator_type="linear", **kwargs):
|
| 11 |
+
self._supported_models = {"linear": OLS, "OLS": OLS, "GLS": GLS, "WLS": WLS}
|
| 12 |
+
if estimator_type not in self._supported_models.keys():
|
| 13 |
+
raise NotImplementedError(
|
| 14 |
+
"We currently only support OLS, GLS, and WLS. Please specify which you would like to use."
|
| 15 |
+
)
|
| 16 |
+
else:
|
| 17 |
+
self.estimator = self._supported_models[estimator_type]
|
| 18 |
+
|
| 19 |
+
def _model(self, X, Y, Z, data, **kwargs):
|
| 20 |
+
exog = sm.add_constant(data[[X] + list(Z)])
|
| 21 |
+
endog = data[Y]
|
| 22 |
+
return self.estimator(endog=endog, exog=exog, **kwargs)
|
| 23 |
+
|
| 24 |
+
def fit(self, X, Y, Z, data, **kwargs):
|
| 25 |
+
self.estimator = self._model(X, Y, Z, data, **kwargs).fit()
|
| 26 |
+
self.ate = self.estimator.params[X]
|
| 27 |
+
return self
|
| 28 |
+
|
| 29 |
+
def _get_ate(self):
|
| 30 |
+
return self.ate
|
| 31 |
+
|
| 32 |
+
def summary(self):
|
| 33 |
+
return self.estimator.summary()
|
testbed/pgmpy__pgmpy/pgmpy/estimators/MLE.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding:utf-8
|
| 2 |
+
|
| 3 |
+
from itertools import chain
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from joblib import Parallel, delayed
|
| 7 |
+
|
| 8 |
+
from pgmpy.estimators import ParameterEstimator
|
| 9 |
+
from pgmpy.factors import FactorDict
|
| 10 |
+
from pgmpy.factors.discrete import TabularCPD
|
| 11 |
+
from pgmpy.models import BayesianNetwork, JunctionTree
|
| 12 |
+
from pgmpy.base import DAG
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class MaximumLikelihoodEstimator(ParameterEstimator):
|
| 16 |
+
"""
|
| 17 |
+
Class used to compute parameters for a model using Maximum Likelihood Estimation.
|
| 18 |
+
|
| 19 |
+
Parameters
|
| 20 |
+
----------
|
| 21 |
+
model: A pgmpy.models.BayesianNetwork or pgmpy.models.JunctionTree instance
|
| 22 |
+
|
| 23 |
+
data: pandas DataFrame object
|
| 24 |
+
DataFrame object with column names identical to the variable names of the network.
|
| 25 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 26 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 27 |
+
|
| 28 |
+
state_names: dict (optional)
|
| 29 |
+
A dict indicating, for each variable, the discrete set of states
|
| 30 |
+
that the variable can take. If unspecified, the observed values
|
| 31 |
+
in the data set are taken to be the only possible states.
|
| 32 |
+
|
| 33 |
+
Examples
|
| 34 |
+
--------
|
| 35 |
+
>>> import numpy as np
|
| 36 |
+
>>> import pandas as pd
|
| 37 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 38 |
+
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
|
| 39 |
+
>>> data = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
|
| 40 |
+
... columns=['A', 'B', 'C', 'D', 'E'])
|
| 41 |
+
>>> model = BayesianNetwork([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
|
| 42 |
+
>>> estimator = MaximumLikelihoodEstimator(model, data)
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(self, model, data, **kwargs):
|
| 46 |
+
if not isinstance(model, (BayesianNetwork, JunctionTree, DAG)):
|
| 47 |
+
raise NotImplementedError(
|
| 48 |
+
"Maximum Likelihood Estimate is only implemented for BayesianNetwork, JunctionTree, and DAG"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
if isinstance(model, (DAG, BayesianNetwork)):
|
| 52 |
+
if len(model.latents) > 0:
|
| 53 |
+
raise ValueError(
|
| 54 |
+
f"Found latent variables: {model.latents}. Maximum Likelihood doesn't support latent variables, please use ExpectationMaximization."
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
elif set(model.nodes()) > set(data.columns):
|
| 58 |
+
raise ValueError(
|
| 59 |
+
f"Nodes detected in the model that are not present in the dataset: {set(model.nodes) - set(data.columns)}. "
|
| 60 |
+
"Refine the model so that all parameters can be estimated from the data."
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
if isinstance(model, JunctionTree):
|
| 64 |
+
if len(set(data.columns) - set(chain(*model.nodes()))) != 0:
|
| 65 |
+
raise ValueError(
|
| 66 |
+
f"Nodes detected in the JunctionTree that are not present in the dataset: {set(data.columns) - set(chain(*model.nodes()))} "
|
| 67 |
+
"Refine the model to ensure all parameters can be estimated."
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
super(MaximumLikelihoodEstimator, self).__init__(model, data, **kwargs)
|
| 71 |
+
|
| 72 |
+
def get_parameters(self, n_jobs=1, weighted=False):
|
| 73 |
+
"""
|
| 74 |
+
Method to estimate the model parameters using Maximum Likelihood Estimation.
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
n_jobs: int (default: 1)
|
| 79 |
+
Number of jobs to run in parallel. Default: 1 uses all the processors.
|
| 80 |
+
Using n_jobs > 1 for small models might be slower.
|
| 81 |
+
|
| 82 |
+
weighted: bool
|
| 83 |
+
If weighted=True, the data must contain a `_weight` column specifying the
|
| 84 |
+
weight of each datapoint (row). If False, assigns an equal weight to each
|
| 85 |
+
datapoint.
|
| 86 |
+
|
| 87 |
+
Returns
|
| 88 |
+
-------
|
| 89 |
+
Estimated parameters: list or pgmpy.factors.FactorDict
|
| 90 |
+
List of pgmpy.factors.discrete.TabularCPDs, one for each variable of the model
|
| 91 |
+
Or a FactorDict representing potential values of a Junction Tree
|
| 92 |
+
|
| 93 |
+
Examples
|
| 94 |
+
--------
|
| 95 |
+
>>> import numpy as np
|
| 96 |
+
>>> import pandas as pd
|
| 97 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 98 |
+
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
|
| 99 |
+
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
|
| 100 |
+
... columns=['A', 'B', 'C', 'D'])
|
| 101 |
+
>>> model = BayesianNetwork([('A', 'B'), ('C', 'B'), ('C', 'D')])
|
| 102 |
+
>>> estimator = MaximumLikelihoodEstimator(model, values)
|
| 103 |
+
>>> estimator.get_parameters()
|
| 104 |
+
[<TabularCPD representing P(C:2) at 0x7f7b534251d0>,
|
| 105 |
+
<TabularCPD representing P(B:2 | C:2, A:2) at 0x7f7b4dfd4da0>,
|
| 106 |
+
<TabularCPD representing P(A:2) at 0x7f7b4dfd4fd0>,
|
| 107 |
+
<TabularCPD representing P(D:2 | C:2) at 0x7f7b4df822b0>]
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
if isinstance(self.model, JunctionTree):
|
| 111 |
+
return self.estimate_potentials()
|
| 112 |
+
|
| 113 |
+
parameters = Parallel(n_jobs=n_jobs)(
|
| 114 |
+
delayed(self.estimate_cpd)(node, weighted) for node in self.model.nodes()
|
| 115 |
+
)
|
| 116 |
+
# TODO: A hacky solution to return correct value for the chosen backend. Ref #1675
|
| 117 |
+
parameters = [p.copy() for p in parameters]
|
| 118 |
+
|
| 119 |
+
return parameters
|
| 120 |
+
|
| 121 |
+
def estimate_cpd(self, node, weighted=False):
|
| 122 |
+
"""
|
| 123 |
+
Method to estimate the CPD for a given variable.
|
| 124 |
+
|
| 125 |
+
Parameters
|
| 126 |
+
----------
|
| 127 |
+
node: int, string (any hashable python object)
|
| 128 |
+
The name of the variable for which the CPD is to be estimated.
|
| 129 |
+
|
| 130 |
+
weighted: bool
|
| 131 |
+
If weighted=True, the data must contain a `_weight` column specifying the
|
| 132 |
+
weight of each datapoint (row). If False, assigns an equal weight to each
|
| 133 |
+
datapoint.
|
| 134 |
+
|
| 135 |
+
Returns
|
| 136 |
+
-------
|
| 137 |
+
Estimated CPD: pgmpy.factors.discrete.TabularCPD
|
| 138 |
+
Estimated CPD for `node`.
|
| 139 |
+
|
| 140 |
+
Examples
|
| 141 |
+
--------
|
| 142 |
+
>>> import pandas as pd
|
| 143 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 144 |
+
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
|
| 145 |
+
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
|
| 146 |
+
>>> model = BayesianNetwork([('A', 'C'), ('B', 'C')])
|
| 147 |
+
>>> cpd_A = MaximumLikelihoodEstimator(model, data).estimate_cpd('A')
|
| 148 |
+
>>> print(cpd_A)
|
| 149 |
+
╒══════╤══════════╕
|
| 150 |
+
│ A(0) │ 0.666667 │
|
| 151 |
+
├──────┼──────────┤
|
| 152 |
+
│ A(1) │ 0.333333 │
|
| 153 |
+
╘══════╧══════════╛
|
| 154 |
+
>>> cpd_C = MaximumLikelihoodEstimator(model, data).estimate_cpd('C')
|
| 155 |
+
>>> print(cpd_C)
|
| 156 |
+
╒══════╤══════╤══════╤══════╤══════╕
|
| 157 |
+
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
|
| 158 |
+
├──────┼──────┼──────┼──────┼──────┤
|
| 159 |
+
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
|
| 160 |
+
├──────┼──────┼──────┼──────┼──────┤
|
| 161 |
+
│ C(0) │ 0.0 │ 0.0 │ 1.0 │ 0.5 │
|
| 162 |
+
├──────┼──────┼──────┼──────┼──────┤
|
| 163 |
+
│ C(1) │ 1.0 │ 1.0 │ 0.0 │ 0.5 │
|
| 164 |
+
╘══════╧══════╧══════╧══════╧══════╛
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
state_counts = self.state_counts(node, weighted=weighted)
|
| 168 |
+
|
| 169 |
+
# if a column contains only `0`s (no states observed for some configuration
|
| 170 |
+
# of parents' states) fill that column uniformly instead
|
| 171 |
+
state_counts.iloc[:, (state_counts.values == 0).all(axis=0)] = 1.0
|
| 172 |
+
|
| 173 |
+
parents = sorted(self.model.get_parents(node))
|
| 174 |
+
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
|
| 175 |
+
node_cardinality = len(self.state_names[node])
|
| 176 |
+
|
| 177 |
+
# Get the state names for the CPD
|
| 178 |
+
state_names = {node: list(state_counts.index)}
|
| 179 |
+
if parents:
|
| 180 |
+
state_names.update(
|
| 181 |
+
{
|
| 182 |
+
state_counts.columns.names[i]: list(state_counts.columns.levels[i])
|
| 183 |
+
for i in range(len(parents))
|
| 184 |
+
}
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
cpd = TabularCPD(
|
| 188 |
+
node,
|
| 189 |
+
node_cardinality,
|
| 190 |
+
np.array(state_counts),
|
| 191 |
+
evidence=parents,
|
| 192 |
+
evidence_card=parents_cardinalities,
|
| 193 |
+
state_names={var: self.state_names[var] for var in chain([node], parents)},
|
| 194 |
+
)
|
| 195 |
+
cpd.normalize()
|
| 196 |
+
return cpd
|
| 197 |
+
|
| 198 |
+
def estimate_potentials(self):
|
| 199 |
+
"""
|
| 200 |
+
Implements Iterative Proportional Fitting to estimate potentials specifically
|
| 201 |
+
for a Decomposable Undirected Graphical Model. Decomposability is enforced
|
| 202 |
+
by using a Junction Tree.
|
| 203 |
+
|
| 204 |
+
Returns
|
| 205 |
+
-------
|
| 206 |
+
Estimated potentials: pgmpy.factors.FactorDict
|
| 207 |
+
Estimated potentials for the entire graphical model.
|
| 208 |
+
|
| 209 |
+
References
|
| 210 |
+
---------
|
| 211 |
+
[1] Kevin P. Murphy, ML Machine Learning - A Probabilistic Perspective
|
| 212 |
+
Algorithm 19.2 Iterative Proportional Fitting algorithm for tabular MRFs & Section 19.5.7.4 IPF for decomposable graphical models.
|
| 213 |
+
[2] Eric P. Xing, Meng Song, Li Zhou, Probabilistic Graphical Models 10-708, Spring 2014.
|
| 214 |
+
https://www.cs.cmu.edu/~epxing/Class/10708-14/scribe_notes/scribe_note_lecture8.pdf.
|
| 215 |
+
|
| 216 |
+
Examples
|
| 217 |
+
--------
|
| 218 |
+
>>> import pandas as pd
|
| 219 |
+
>>> from pgmpy.models import JunctionTree
|
| 220 |
+
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
|
| 221 |
+
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
|
| 222 |
+
>>> model = JunctionTree()
|
| 223 |
+
>>> model.add_edges_from([(("A", "C"), ("B", "C"))])
|
| 224 |
+
>>> potentials = MaximumLikelihoodEstimator(model, data).estimate_potentials()
|
| 225 |
+
>>> print(potentials[("A", "C")])
|
| 226 |
+
+------+------+------------+
|
| 227 |
+
| A | C | phi(A,C) |
|
| 228 |
+
+======+======+============+
|
| 229 |
+
| A(0) | C(0) | 0.0000 |
|
| 230 |
+
+------+------+------------+
|
| 231 |
+
| A(0) | C(1) | 0.6667 |
|
| 232 |
+
+------+------+------------+
|
| 233 |
+
| A(1) | C(0) | 0.3333 |
|
| 234 |
+
+------+------+------------+
|
| 235 |
+
| A(1) | C(1) | 0.0000 |
|
| 236 |
+
+------+------+------------+
|
| 237 |
+
>>> print(potentials[("B", "C")])
|
| 238 |
+
+------+------+------------+
|
| 239 |
+
| B | C | phi(B,C) |
|
| 240 |
+
+======+======+============+
|
| 241 |
+
| B(0) | C(0) | 1.0000 |
|
| 242 |
+
+------+------+------------+
|
| 243 |
+
| B(0) | C(1) | 0.5000 |
|
| 244 |
+
+------+------+------------+
|
| 245 |
+
| B(1) | C(0) | 0.0000 |
|
| 246 |
+
+------+------+------------+
|
| 247 |
+
| B(1) | C(1) | 0.5000 |
|
| 248 |
+
+------+------+------------+
|
| 249 |
+
"""
|
| 250 |
+
if not isinstance(self.model, JunctionTree):
|
| 251 |
+
raise NotImplementedError(
|
| 252 |
+
"Iterative Proportional Fitting is only implemented for Junction Trees."
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
if not hasattr(self.model, "clique_beliefs"):
|
| 256 |
+
raise NotImplementedError(
|
| 257 |
+
"A model containing clique beliefs is required to estimate parameters."
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
clique_beliefs = self.model.clique_beliefs
|
| 261 |
+
|
| 262 |
+
if not isinstance(clique_beliefs, FactorDict):
|
| 263 |
+
raise TypeError(
|
| 264 |
+
"`UndirectedMaximumLikelihoodEstimator.model.clique_beliefs` must be a `FactorDict`."
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
# These are the variables as represented by the `JunctionTree`.
|
| 268 |
+
cliques = list(clique_beliefs.keys())
|
| 269 |
+
empirical_marginals = FactorDict.from_dataframe(df=self.data, marginals=cliques)
|
| 270 |
+
potentials = FactorDict({})
|
| 271 |
+
seen = set()
|
| 272 |
+
|
| 273 |
+
# ML Machine Learning - A Probabilistic Perspective
|
| 274 |
+
# Chapter 19, Algorithm 19.2, Page 682:
|
| 275 |
+
# Update each clique by multiplying the potential value by
|
| 276 |
+
# the ratio of the empirical counts over expected counts.
|
| 277 |
+
# Since the potential values are equal to the expected counts
|
| 278 |
+
# for a JunctionTree, we can simplify this to just the empirical counts.
|
| 279 |
+
# This is also described in section 19.5.7.4.
|
| 280 |
+
for clique in cliques:
|
| 281 |
+
# Calculate the running sepset between the new clique and all of the
|
| 282 |
+
# variables we have previously seen.
|
| 283 |
+
variables = tuple(set(clique) - seen)
|
| 284 |
+
seen.update(clique)
|
| 285 |
+
potentials[clique] = empirical_marginals[clique]
|
| 286 |
+
|
| 287 |
+
# Divide out the sepset.
|
| 288 |
+
if variables:
|
| 289 |
+
marginalized = empirical_marginals[clique].marginalize(
|
| 290 |
+
variables=variables, inplace=False
|
| 291 |
+
)
|
| 292 |
+
potentials[clique] = potentials[clique] / marginalized
|
| 293 |
+
return potentials
|
testbed/pgmpy__pgmpy/pgmpy/estimators/MirrorDescentEstimator.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy.special import logsumexp
|
| 4 |
+
from tqdm.auto import tqdm
|
| 5 |
+
|
| 6 |
+
from pgmpy.estimators.base import MarginalEstimator
|
| 7 |
+
from pgmpy.factors import FactorDict
|
| 8 |
+
from pgmpy.utils import compat_fns
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class MirrorDescentEstimator(MarginalEstimator):
|
| 12 |
+
"""
|
| 13 |
+
Class for estimation of a undirected graphical model based upon observed
|
| 14 |
+
marginals from a tabular dataset. Estimated parameters are found from an
|
| 15 |
+
entropic mirror descent algorithm for solving convex optimization problems
|
| 16 |
+
over the probability simplex.
|
| 17 |
+
|
| 18 |
+
Parameters
|
| 19 |
+
----------
|
| 20 |
+
model: MarkovNetwork | FactorGraph | JunctionTree
|
| 21 |
+
A model to optimize, using Belief Propagation and an estimation method.
|
| 22 |
+
|
| 23 |
+
data: pandas DataFrame object
|
| 24 |
+
dataframe object where each column represents one variable.
|
| 25 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 26 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 27 |
+
|
| 28 |
+
state_names: dict (optional)
|
| 29 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 30 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 31 |
+
are taken to be the only possible states.
|
| 32 |
+
|
| 33 |
+
References
|
| 34 |
+
----------
|
| 35 |
+
[1] McKenna, Ryan, Daniel Sheldon, and Gerome Miklau.
|
| 36 |
+
"Graphical-model based estimation and inference for differential privacy." In Proceedings of the 36th International Conference on Machine Learning. 2019, Appendix A.1.
|
| 37 |
+
https://arxiv.org/abs/1901.09136.
|
| 38 |
+
[2] Beck, A. and Teboulle, M. Mirror descent and nonlinear projected subgradient methods for convex optimization. Operations Research Letters, 31(3):167–175, 2003
|
| 39 |
+
https://www.sciencedirect.com/science/article/abs/pii/S0167637702002316.
|
| 40 |
+
[3] Wainwright, M. J. and Jordan, M. I.
|
| 41 |
+
Graphical models, exponential families, and variational inference. Foundations and Trends in Machine Learning, 1(1-2):1–305, 2008, Section 3.6 Conjugate Duality: Maximum Likelihood and Maximum Entropy.
|
| 42 |
+
https://people.eecs.berkeley.edu/~wainwrig/Papers/WaiJor08_FTML.pdf
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def _calibrate(self, theta, n):
|
| 46 |
+
"""
|
| 47 |
+
Wrapper for JunctionTree.calibrate that handles:
|
| 48 |
+
1) getting and setting clique_beliefs
|
| 49 |
+
2) normalizing cliques in log-space
|
| 50 |
+
3) returning marginal values in the original space
|
| 51 |
+
|
| 52 |
+
Parameters
|
| 53 |
+
----------
|
| 54 |
+
theta: FactorDict
|
| 55 |
+
Mapping of clique to factors in a JunctionTree.
|
| 56 |
+
|
| 57 |
+
n: int
|
| 58 |
+
Total number of observations from a dataset.
|
| 59 |
+
|
| 60 |
+
Returns
|
| 61 |
+
-------
|
| 62 |
+
mu: FactorDict
|
| 63 |
+
Mapping of clique to factors representing marginal beliefs.
|
| 64 |
+
"""
|
| 65 |
+
# Assign a new value for theta.
|
| 66 |
+
self.belief_propagation.junction_tree.clique_beliefs = theta
|
| 67 |
+
|
| 68 |
+
# TODO: Currently, belief propagation operates in the original space.
|
| 69 |
+
# To be compatible with this function and for better numerical conditioning,
|
| 70 |
+
# allow calibration to happen in log-space.
|
| 71 |
+
self.belief_propagation.calibrate()
|
| 72 |
+
mu = self.belief_propagation.junction_tree.clique_beliefs
|
| 73 |
+
cliques = list(mu.keys())
|
| 74 |
+
clique = cliques[0]
|
| 75 |
+
|
| 76 |
+
# Normalize each clique (in log-space) for numerical stability
|
| 77 |
+
# and then convert the marginals back to probability space so
|
| 78 |
+
# they are comparable with the observed marginals.
|
| 79 |
+
log_z = logsumexp(mu[clique].values)
|
| 80 |
+
for clique in cliques:
|
| 81 |
+
mu[clique] += np.log(n) - log_z
|
| 82 |
+
mu[clique].values = compat_fns.exp(mu[clique].values)
|
| 83 |
+
return mu
|
| 84 |
+
|
| 85 |
+
def estimate(
|
| 86 |
+
self,
|
| 87 |
+
marginals,
|
| 88 |
+
metric="L2",
|
| 89 |
+
iterations=100,
|
| 90 |
+
stepsize=None,
|
| 91 |
+
show_progress=True,
|
| 92 |
+
):
|
| 93 |
+
"""
|
| 94 |
+
Method to estimate the marginals for a given dataset.
|
| 95 |
+
|
| 96 |
+
Parameters
|
| 97 |
+
----------
|
| 98 |
+
marginals: List[Tuple[str, ...]]
|
| 99 |
+
The names of the marginals to be estimated. These marginals must be present
|
| 100 |
+
in the data passed to the `__init__()` method.
|
| 101 |
+
|
| 102 |
+
metric: str
|
| 103 |
+
One of either 'L1' or 'L2'.
|
| 104 |
+
|
| 105 |
+
iterations: int
|
| 106 |
+
The number of iterations to run mirror descent optimization.
|
| 107 |
+
|
| 108 |
+
stepsize: Optional[float]
|
| 109 |
+
The step size of each mirror descent gradient.
|
| 110 |
+
If None, stepsize is defaulted as: `alpha = 2.0 / len(self.data) ** 2`
|
| 111 |
+
and a line search is conducted each iteration.
|
| 112 |
+
|
| 113 |
+
show_progress: bool
|
| 114 |
+
Whether to show a tqdm progress bar during during optimization.
|
| 115 |
+
|
| 116 |
+
Notes
|
| 117 |
+
-------
|
| 118 |
+
Estimation occurs in log-space.
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
Returns
|
| 122 |
+
-------
|
| 123 |
+
Estimated Junction Tree: pgmpy.models.JunctionTree.JunctionTree
|
| 124 |
+
Estimated Junction Tree with potentials optimized to faithfully
|
| 125 |
+
represent `marginals` from a dataset.
|
| 126 |
+
|
| 127 |
+
Examples
|
| 128 |
+
--------
|
| 129 |
+
>>> import pandas as pd
|
| 130 |
+
>>> import numpy as np
|
| 131 |
+
>>> from pgmpy.models import FactorGraph
|
| 132 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 133 |
+
>>> from pgmpy.estimators import MirrorDescentEstimator
|
| 134 |
+
>>> data = pd.DataFrame(data={"a": [0, 0, 1, 1, 1], "b": [0, 1, 0, 1, 1]})
|
| 135 |
+
>>> model = FactorGraph()
|
| 136 |
+
>>> model.add_nodes_from(["a", "b"])
|
| 137 |
+
>>> phi1 = DiscreteFactor(["a", "b"], [2, 2], np.zeros(4))
|
| 138 |
+
>>> model.add_factors(phi1)
|
| 139 |
+
>>> model.add_edges_from([("a", phi1), ("b", phi1)])
|
| 140 |
+
>>> tree1 = MirrorDescentEstimator(model=model, data=data).estimate(marginals=[("a", "b")])
|
| 141 |
+
>>> print(tree1.factors[0])
|
| 142 |
+
+------+------+------------+
|
| 143 |
+
| a | b | phi(a,b) |
|
| 144 |
+
+======+======+============+
|
| 145 |
+
| a(0) | b(0) | 1.0000 |
|
| 146 |
+
+------+------+------------+
|
| 147 |
+
| a(0) | b(1) | 1.0000 |
|
| 148 |
+
+------+------+------------+
|
| 149 |
+
| a(1) | b(0) | 1.0000 |
|
| 150 |
+
+------+------+------------+
|
| 151 |
+
| a(1) | b(1) | 2.0000 |
|
| 152 |
+
+------+------+------------+
|
| 153 |
+
>>> tree2 = MirrorDescentEstimator(model=model, data=data).estimate(marginals=[("a",)])
|
| 154 |
+
>>> print(tree2.factors[0])
|
| 155 |
+
+------+------+------------+
|
| 156 |
+
| a | b | phi(a,b) |
|
| 157 |
+
+======+======+============+
|
| 158 |
+
| a(0) | b(0) | 1.0000 |
|
| 159 |
+
+------+------+------------+
|
| 160 |
+
| a(0) | b(1) | 1.0000 |
|
| 161 |
+
+------+------+------------+
|
| 162 |
+
| a(1) | b(0) | 1.5000 |
|
| 163 |
+
+------+------+------------+
|
| 164 |
+
| a(1) | b(1) | 1.5000 |
|
| 165 |
+
+------+------+------------+
|
| 166 |
+
"""
|
| 167 |
+
# Step 1: Setup variables such as data, step size, and clique to marginal mapping.
|
| 168 |
+
if self.data is None:
|
| 169 |
+
raise ValueError(f"No data was found to fit to the marginals {marginals}")
|
| 170 |
+
|
| 171 |
+
n = len(self.data)
|
| 172 |
+
|
| 173 |
+
_no_line_search = stepsize is not None
|
| 174 |
+
alpha = stepsize if isinstance(stepsize, float) else 1.0 / n**2
|
| 175 |
+
|
| 176 |
+
clique_to_marginal = self._clique_to_marginal(
|
| 177 |
+
marginals=FactorDict.from_dataframe(df=self.data, marginals=marginals),
|
| 178 |
+
clique_nodes=self.belief_propagation.junction_tree.nodes(),
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# Step 2: Perform calibration to initialize variables.
|
| 182 |
+
theta = (
|
| 183 |
+
self.theta
|
| 184 |
+
if self.theta
|
| 185 |
+
else self.belief_propagation.junction_tree.clique_beliefs
|
| 186 |
+
)
|
| 187 |
+
mu = self._calibrate(theta=theta, n=n)
|
| 188 |
+
answer = self._marginal_loss(
|
| 189 |
+
marginals=mu, clique_to_marginal=clique_to_marginal, metric=metric
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Step 3: Optimize the potentials based off the observed marginals.
|
| 193 |
+
pbar = tqdm(range(iterations)) if show_progress else range(iterations)
|
| 194 |
+
for _ in pbar:
|
| 195 |
+
omega, nu = theta, mu
|
| 196 |
+
curr_loss, dL = answer
|
| 197 |
+
if not _no_line_search:
|
| 198 |
+
alpha *= 2
|
| 199 |
+
|
| 200 |
+
if isinstance(pbar, tqdm):
|
| 201 |
+
pbar.set_description_str(
|
| 202 |
+
",\t".join(
|
| 203 |
+
[
|
| 204 |
+
"Loss: {:e}".format(curr_loss),
|
| 205 |
+
"Grad Norm: {:e}".format(np.sqrt(dL.dot(dL))),
|
| 206 |
+
"alpha: {:e}".format(alpha),
|
| 207 |
+
]
|
| 208 |
+
)
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
for __ in range(25):
|
| 212 |
+
# Take gradient step.
|
| 213 |
+
theta = omega - alpha * dL
|
| 214 |
+
|
| 215 |
+
# Calibrate to propogate gradients through the graph.
|
| 216 |
+
mu = self._calibrate(theta=theta, n=n)
|
| 217 |
+
|
| 218 |
+
# Compute the new loss with respect to the updated beliefs.
|
| 219 |
+
answer = self._marginal_loss(
|
| 220 |
+
marginals=mu, clique_to_marginal=clique_to_marginal, metric=metric
|
| 221 |
+
)
|
| 222 |
+
# If we haven't appreciably improved, try reducing the step size.
|
| 223 |
+
# Otherwise, we break to the next iteration.
|
| 224 |
+
_step = 0.5 * alpha * dL.dot(nu - mu)
|
| 225 |
+
if _no_line_search or curr_loss - answer[0] >= _step:
|
| 226 |
+
break
|
| 227 |
+
alpha *= 0.5
|
| 228 |
+
|
| 229 |
+
self.theta = theta
|
| 230 |
+
self.belief_propagation.junction_tree.clique_beliefs = mu
|
| 231 |
+
return self.belief_propagation.junction_tree
|
testbed/pgmpy__pgmpy/pgmpy/estimators/MmhcEstimator.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
from pgmpy.base import UndirectedGraph
|
| 3 |
+
from pgmpy.estimators import BDeu, HillClimbSearch, StructureEstimator
|
| 4 |
+
from pgmpy.estimators.CITests import chi_square
|
| 5 |
+
from pgmpy.independencies import IndependenceAssertion, Independencies
|
| 6 |
+
from pgmpy.models import BayesianNetwork
|
| 7 |
+
from pgmpy.utils.mathext import powerset
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MmhcEstimator(StructureEstimator):
|
| 11 |
+
"""
|
| 12 |
+
Implements the MMHC hybrid structure estimation procedure for
|
| 13 |
+
learning BayesianNetworks from discrete data.
|
| 14 |
+
|
| 15 |
+
Parameters
|
| 16 |
+
----------
|
| 17 |
+
data: pandas DataFrame object
|
| 18 |
+
dataframe object where each column represents one variable.
|
| 19 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 20 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 21 |
+
|
| 22 |
+
state_names: dict (optional)
|
| 23 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 24 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 25 |
+
are taken to be the only possible states.
|
| 26 |
+
|
| 27 |
+
References
|
| 28 |
+
----------
|
| 29 |
+
Tsamardinos et al., The max-min hill-climbing Bayesian network structure learning algorithm (2005)
|
| 30 |
+
http://www.dsl-lab.org/supplements/mmhc_paper/paper_online.pdf
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self, data, **kwargs):
|
| 34 |
+
super(MmhcEstimator, self).__init__(data, **kwargs)
|
| 35 |
+
|
| 36 |
+
def estimate(self, scoring_method=None, tabu_length=10, significance_level=0.01):
|
| 37 |
+
"""
|
| 38 |
+
Estimates a BayesianNetwork for the data set, using MMHC. First estimates a
|
| 39 |
+
graph skeleton using MMPC and then orients the edges using score-based local
|
| 40 |
+
search (hill climbing).
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
significance_level: float, default: 0.01
|
| 45 |
+
The significance level to use for conditional independence tests in the data set. See `mmpc`-method.
|
| 46 |
+
|
| 47 |
+
scoring_method: instance of a Scoring method (default: BDeu)
|
| 48 |
+
The method to use for scoring during Hill Climb Search. Can be an instance of any of the
|
| 49 |
+
scoring methods implemented in pgmpy.
|
| 50 |
+
|
| 51 |
+
tabu_length: int
|
| 52 |
+
If provided, the last `tabu_length` graph modifications cannot be reversed
|
| 53 |
+
during the search procedure. This serves to enforce a wider exploration
|
| 54 |
+
of the search space. Default value: 100.
|
| 55 |
+
|
| 56 |
+
Returns
|
| 57 |
+
-------
|
| 58 |
+
Estimated model: pgmpy.base.DAG
|
| 59 |
+
The estimated model without the parameterization.
|
| 60 |
+
|
| 61 |
+
References
|
| 62 |
+
----------
|
| 63 |
+
Tsamardinos et al., The max-min hill-climbing Bayesian network structure learning algorithm (2005),
|
| 64 |
+
Algorithm 3
|
| 65 |
+
http://www.dsl-lab.org/supplements/mmhc_paper/paper_online.pdf
|
| 66 |
+
|
| 67 |
+
Examples
|
| 68 |
+
--------
|
| 69 |
+
>>> import pandas as pd
|
| 70 |
+
>>> import numpy as np
|
| 71 |
+
>>> from pgmpy.estimators import MmhcEstimator
|
| 72 |
+
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(2500, 4)), columns=list('XYZW'))
|
| 73 |
+
>>> data['sum'] = data.sum(axis=1)
|
| 74 |
+
>>> est = MmhcEstimator(data)
|
| 75 |
+
>>> model = est.estimate()
|
| 76 |
+
>>> print(model.edges())
|
| 77 |
+
[('Z', 'sum'), ('X', 'sum'), ('W', 'sum'), ('Y', 'sum')]
|
| 78 |
+
"""
|
| 79 |
+
if scoring_method is None:
|
| 80 |
+
scoring_method = BDeu(self.data, equivalent_sample_size=10)
|
| 81 |
+
|
| 82 |
+
skel = self.mmpc(significance_level)
|
| 83 |
+
|
| 84 |
+
hc = HillClimbSearch(self.data)
|
| 85 |
+
|
| 86 |
+
model = hc.estimate(
|
| 87 |
+
scoring_method=scoring_method,
|
| 88 |
+
white_list=skel.to_directed().edges(),
|
| 89 |
+
tabu_length=tabu_length,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
return model
|
| 93 |
+
|
| 94 |
+
def mmpc(self, significance_level=0.01):
|
| 95 |
+
"""Estimates a graph skeleton (UndirectedGraph) for the data set, using then
|
| 96 |
+
MMPC (max-min parents-and-children) algorithm.
|
| 97 |
+
|
| 98 |
+
Parameters
|
| 99 |
+
----------
|
| 100 |
+
significance_level: float, default=0.01
|
| 101 |
+
The significance level to use for conditional independence tests in the data set.
|
| 102 |
+
|
| 103 |
+
`significance_level` is the desired Type 1 error probability of
|
| 104 |
+
falsely rejecting the null hypothesis that variables are independent,
|
| 105 |
+
given that they are. The lower `significance_level`, the less likely
|
| 106 |
+
we are to accept dependencies, resulting in a sparser graph.
|
| 107 |
+
|
| 108 |
+
Returns
|
| 109 |
+
-------
|
| 110 |
+
skeleton: pgmpy.base.UndirectedGraph
|
| 111 |
+
An estimate for the undirected graph skeleton of the BN underlying the data.
|
| 112 |
+
|
| 113 |
+
seperating_sets: dict
|
| 114 |
+
A dict containing for each pair of not directly connected nodes a
|
| 115 |
+
seperating set ("witnessing set") of variables that makes then
|
| 116 |
+
conditionally independent. (needed for edge orientation)
|
| 117 |
+
|
| 118 |
+
References
|
| 119 |
+
----------
|
| 120 |
+
Tsamardinos et al., The max-min hill-climbing Bayesian network structure
|
| 121 |
+
learning algorithm (2005), Algorithm 1 & 2
|
| 122 |
+
http://www.dsl-lab.org/supplements/mmhc_paper/paper_online.pdf
|
| 123 |
+
|
| 124 |
+
Examples
|
| 125 |
+
--------
|
| 126 |
+
>>> import pandas as pd
|
| 127 |
+
>>> import numpy as np
|
| 128 |
+
>>> from pgmpy.estimators import MmhcEstimator
|
| 129 |
+
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 5)), columns=list('ABCDE'))
|
| 130 |
+
>>> data['F'] = data['A'] + data['B'] + data ['C']
|
| 131 |
+
>>> est = PC(data)
|
| 132 |
+
>>> skel, sep_sets = est.estimate_skeleton()
|
| 133 |
+
>>> skel.edges()
|
| 134 |
+
[('A', 'F'), ('B', 'F'), ('C', 'F')]
|
| 135 |
+
>>> # all independencies are unconditional:
|
| 136 |
+
>>> sep_sets
|
| 137 |
+
{('D', 'A'): (), ('C', 'A'): (), ('C', 'E'): (), ('E', 'F'): (), ('B', 'D'): (),
|
| 138 |
+
('B', 'E'): (), ('D', 'F'): (), ('D', 'E'): (), ('A', 'E'): (), ('B', 'A'): (),
|
| 139 |
+
('B', 'C'): (), ('C', 'D'): ()}
|
| 140 |
+
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 3)), columns=list('XYZ'))
|
| 141 |
+
>>> data['X'] += data['Z']
|
| 142 |
+
>>> data['Y'] += data['Z']
|
| 143 |
+
>>> est = PC(data)
|
| 144 |
+
>>> skel, sep_sets = est.estimate_skeleton()
|
| 145 |
+
>>> skel.edges()
|
| 146 |
+
[('X', 'Z'), ('Y', 'Z')]
|
| 147 |
+
>>> # X, Y dependent, but conditionally independent given Z:
|
| 148 |
+
>>> sep_sets
|
| 149 |
+
{('X', 'Y'): ('Z',)}
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
nodes = self.state_names.keys()
|
| 153 |
+
|
| 154 |
+
def assoc(X, Y, Zs):
|
| 155 |
+
"""Measure for (conditional) association between variables. Use negative
|
| 156 |
+
p-value of independence test.
|
| 157 |
+
"""
|
| 158 |
+
return 1 - chi_square(X, Y, Zs, self.data, boolean=False)[1]
|
| 159 |
+
|
| 160 |
+
def min_assoc(X, Y, Zs):
|
| 161 |
+
"Minimal association of X, Y given any subset of Zs."
|
| 162 |
+
return min(assoc(X, Y, Zs_subset) for Zs_subset in powerset(Zs))
|
| 163 |
+
|
| 164 |
+
def max_min_heuristic(X, Zs):
|
| 165 |
+
"Finds variable that maximizes min_assoc with `node` relative to `neighbors`."
|
| 166 |
+
max_min_assoc = 0
|
| 167 |
+
best_Y = None
|
| 168 |
+
|
| 169 |
+
for Y in set(nodes) - set(Zs + [X]):
|
| 170 |
+
min_assoc_val = min_assoc(X, Y, Zs)
|
| 171 |
+
if min_assoc_val >= max_min_assoc:
|
| 172 |
+
best_Y = Y
|
| 173 |
+
max_min_assoc = min_assoc_val
|
| 174 |
+
|
| 175 |
+
return (best_Y, max_min_assoc)
|
| 176 |
+
|
| 177 |
+
# Find parents and children for each node
|
| 178 |
+
neighbors = dict()
|
| 179 |
+
for node in nodes:
|
| 180 |
+
neighbors[node] = []
|
| 181 |
+
|
| 182 |
+
# Forward Phase
|
| 183 |
+
while True:
|
| 184 |
+
new_neighbor, new_neighbor_min_assoc = max_min_heuristic(
|
| 185 |
+
node, neighbors[node]
|
| 186 |
+
)
|
| 187 |
+
if new_neighbor_min_assoc > 0:
|
| 188 |
+
neighbors[node].append(new_neighbor)
|
| 189 |
+
else:
|
| 190 |
+
break
|
| 191 |
+
|
| 192 |
+
# Backward Phase
|
| 193 |
+
for neigh in neighbors[node]:
|
| 194 |
+
other_neighbors = [n for n in neighbors[node] if n != neigh]
|
| 195 |
+
for sep_set in powerset(other_neighbors):
|
| 196 |
+
if chi_square(
|
| 197 |
+
X=node,
|
| 198 |
+
Y=neigh,
|
| 199 |
+
Z=sep_set,
|
| 200 |
+
data=self.data,
|
| 201 |
+
significance_level=significance_level,
|
| 202 |
+
):
|
| 203 |
+
neighbors[node].remove(neigh)
|
| 204 |
+
break
|
| 205 |
+
|
| 206 |
+
# correct for false positives
|
| 207 |
+
for node in nodes:
|
| 208 |
+
for neigh in neighbors[node]:
|
| 209 |
+
if node not in neighbors[neigh]:
|
| 210 |
+
neighbors[node].remove(neigh)
|
| 211 |
+
|
| 212 |
+
skel = UndirectedGraph()
|
| 213 |
+
skel.add_nodes_from(nodes)
|
| 214 |
+
for node in nodes:
|
| 215 |
+
skel.add_edges_from([(node, neigh) for neigh in neighbors[node]])
|
| 216 |
+
|
| 217 |
+
return skel
|
testbed/pgmpy__pgmpy/pgmpy/estimators/SEMEstimator.py
ADDED
|
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import statsmodels.api as sm
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from pgmpy import config
|
| 7 |
+
from pgmpy.models import SEM, SEMAlg, SEMGraph
|
| 8 |
+
from pgmpy.utils import optimize, pinverse
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SEMEstimator(object):
|
| 12 |
+
"""
|
| 13 |
+
Base class of SEM estimators. All the estimators inherit this class.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, model):
|
| 17 |
+
if config.BACKEND == "numpy":
|
| 18 |
+
raise ValueError(
|
| 19 |
+
f"SEMEstimator requires torch backend. Currently it's numpy. Call pgmpy.config.set_backend('torch') to switch"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if isinstance(model, (SEMGraph, SEM)):
|
| 23 |
+
self.model = model.to_lisrel()
|
| 24 |
+
elif isinstance(model, SEMAlg):
|
| 25 |
+
self.model = model
|
| 26 |
+
else:
|
| 27 |
+
raise ValueError(
|
| 28 |
+
f"Model should be an instance of either SEMGraph or SEMAlg class. Got type: {type(model)}"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
# Initialize trainable and fixed mask tensors
|
| 32 |
+
self.B_mask = torch.tensor(
|
| 33 |
+
self.model.B_mask,
|
| 34 |
+
device=config.DEVICE,
|
| 35 |
+
dtype=config.DTYPE,
|
| 36 |
+
requires_grad=False,
|
| 37 |
+
)
|
| 38 |
+
self.zeta_mask = torch.tensor(
|
| 39 |
+
self.model.zeta_mask,
|
| 40 |
+
device=config.DEVICE,
|
| 41 |
+
dtype=config.DTYPE,
|
| 42 |
+
requires_grad=False,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
self.B_fixed_mask = torch.tensor(
|
| 46 |
+
self.model.B_fixed_mask,
|
| 47 |
+
device=config.DEVICE,
|
| 48 |
+
dtype=config.DTYPE,
|
| 49 |
+
requires_grad=False,
|
| 50 |
+
)
|
| 51 |
+
self.zeta_fixed_mask = torch.tensor(
|
| 52 |
+
self.model.zeta_fixed_mask,
|
| 53 |
+
device=config.DEVICE,
|
| 54 |
+
dtype=config.DTYPE,
|
| 55 |
+
requires_grad=False,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
self.wedge_y = torch.tensor(
|
| 59 |
+
self.model.wedge_y,
|
| 60 |
+
device=config.DEVICE,
|
| 61 |
+
dtype=config.DTYPE,
|
| 62 |
+
requires_grad=False,
|
| 63 |
+
)
|
| 64 |
+
self.B_eye = torch.eye(
|
| 65 |
+
self.B_mask.shape[0],
|
| 66 |
+
device=config.DEVICE,
|
| 67 |
+
dtype=config.DTYPE,
|
| 68 |
+
requires_grad=False,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
def _get_implied_cov(self, B, zeta):
|
| 72 |
+
"""
|
| 73 |
+
Computes the implied covariance matrix from the given parameters.
|
| 74 |
+
"""
|
| 75 |
+
B_masked = torch.mul(B, self.B_mask) + self.B_fixed_mask
|
| 76 |
+
B_inv = pinverse(self.B_eye - B_masked)
|
| 77 |
+
zeta_masked = torch.mul(zeta, self.zeta_mask) + self.zeta_fixed_mask
|
| 78 |
+
|
| 79 |
+
return self.wedge_y @ B_inv @ zeta_masked @ B_inv.t() @ self.wedge_y.t()
|
| 80 |
+
|
| 81 |
+
def ml_loss(self, params, loss_args):
|
| 82 |
+
r"""
|
| 83 |
+
Method to compute the Maximum Likelihood loss function. The optimizer calls this
|
| 84 |
+
method after each iteration with updated params to compute the new loss.
|
| 85 |
+
|
| 86 |
+
The fitting function for ML is:
|
| 87 |
+
.. math:: F_{ML} = \log |\Sigma(\theta)| + tr(S \Sigma^{-1}(\theta)) - \log S - (p+q)
|
| 88 |
+
|
| 89 |
+
Parameters
|
| 90 |
+
----------
|
| 91 |
+
params: dict
|
| 92 |
+
params contain all the variables which are updated in each iteration of the
|
| 93 |
+
optimization.
|
| 94 |
+
|
| 95 |
+
loss_args: dict
|
| 96 |
+
loss_args contain all the variable which are not updated in each iteration but
|
| 97 |
+
are required to compute the loss.
|
| 98 |
+
|
| 99 |
+
Returns
|
| 100 |
+
-------
|
| 101 |
+
torch.tensor: The loss value for the given params and loss_args
|
| 102 |
+
"""
|
| 103 |
+
S = loss_args["S"]
|
| 104 |
+
sigma = self._get_implied_cov(params["B"], params["zeta"])
|
| 105 |
+
|
| 106 |
+
return (
|
| 107 |
+
sigma.det().clamp(min=1e-4).log()
|
| 108 |
+
+ (S @ pinverse(sigma)).trace()
|
| 109 |
+
- S.logdet()
|
| 110 |
+
- len(self.model.y)
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
def uls_loss(self, params, loss_args):
|
| 114 |
+
r"""
|
| 115 |
+
Method to compute the Unweighted Least Squares fitting function. The optimizer calls
|
| 116 |
+
this method after each iteration with updated params to compute the new loss.
|
| 117 |
+
|
| 118 |
+
The fitting function for ML is:
|
| 119 |
+
.. math:: F_{ULS} = tr[(S - \Sigma(\theta))^2]
|
| 120 |
+
|
| 121 |
+
Parameters
|
| 122 |
+
----------
|
| 123 |
+
params: dict
|
| 124 |
+
params contain all the variables which are updated in each iteration of the
|
| 125 |
+
optimization.
|
| 126 |
+
|
| 127 |
+
loss_args: dict
|
| 128 |
+
loss_args contain all the variable which are not updated in each iteration but
|
| 129 |
+
are required to compute the loss.
|
| 130 |
+
|
| 131 |
+
Returns
|
| 132 |
+
-------
|
| 133 |
+
torch.tensor: The loss value for the given params and loss_args
|
| 134 |
+
"""
|
| 135 |
+
S = loss_args["S"]
|
| 136 |
+
sigma = self._get_implied_cov(params["B"], params["zeta"])
|
| 137 |
+
return (S - sigma).pow(2).trace()
|
| 138 |
+
|
| 139 |
+
def gls_loss(self, params, loss_args):
|
| 140 |
+
r"""
|
| 141 |
+
Method to compute the Weighted Least Squares fitting function. The optimizer calls
|
| 142 |
+
this method after each iteration with updated params to compute the new loss.
|
| 143 |
+
|
| 144 |
+
The fitting function for ML is:
|
| 145 |
+
.. math:: F_{ULS} = tr \{ [(S - \Sigma(\theta)) W^{-1}]^2 \}
|
| 146 |
+
|
| 147 |
+
Parameters
|
| 148 |
+
----------
|
| 149 |
+
params: dict
|
| 150 |
+
params contain all the variables which are updated in each iteration of the
|
| 151 |
+
optimization.
|
| 152 |
+
|
| 153 |
+
loss_args: dict
|
| 154 |
+
loss_args contain all the variable which are not updated in each iteration but
|
| 155 |
+
are required to compute the loss.
|
| 156 |
+
|
| 157 |
+
Returns
|
| 158 |
+
-------
|
| 159 |
+
torch.tensor: The loss value for the given params and loss_args
|
| 160 |
+
"""
|
| 161 |
+
S = loss_args["S"]
|
| 162 |
+
W_inv = pinverse(loss_args["W"])
|
| 163 |
+
sigma = self._get_implied_cov(params["B"], params["zeta"])
|
| 164 |
+
return ((S - sigma) @ W_inv).pow(2).trace()
|
| 165 |
+
|
| 166 |
+
def get_init_values(self, data, method):
|
| 167 |
+
"""
|
| 168 |
+
Computes the starting values for the optimizer.
|
| 169 |
+
|
| 170 |
+
Reference
|
| 171 |
+
---------
|
| 172 |
+
.. [1] Table 4C.1: Bollen, K. (2014). Structural Equations with Latent Variables.
|
| 173 |
+
New York, NY: John Wiley & Sons.
|
| 174 |
+
|
| 175 |
+
"""
|
| 176 |
+
# Initialize all the values even if the edge doesn't exist, masks would take care of that.
|
| 177 |
+
a = 0.4
|
| 178 |
+
scaling_vars = self.model.to_SEMGraph().get_scaling_indicators()
|
| 179 |
+
eta, m = self.model.eta, len(self.model.eta)
|
| 180 |
+
|
| 181 |
+
if method == "random":
|
| 182 |
+
B = np.random.rand(m, m)
|
| 183 |
+
zeta = np.random.rand(m, m)
|
| 184 |
+
|
| 185 |
+
elif method == "std":
|
| 186 |
+
# Add observed vars to `scaling_vars to point to itself. Trick to keep code short.
|
| 187 |
+
for observed_var in self.model.y:
|
| 188 |
+
scaling_vars[observed_var] = observed_var
|
| 189 |
+
|
| 190 |
+
B = np.random.rand(m, m)
|
| 191 |
+
for i in range(m):
|
| 192 |
+
for j in range(m):
|
| 193 |
+
if scaling_vars[eta[i]] == eta[j]:
|
| 194 |
+
B[i, j] = 1.0
|
| 195 |
+
elif i != j:
|
| 196 |
+
B[i, j] = a * (
|
| 197 |
+
data.loc[:, scaling_vars[eta[i]]].std()
|
| 198 |
+
/ data.loc[:, scaling_vars[eta[j]]].std()
|
| 199 |
+
)
|
| 200 |
+
zeta = np.random.rand(m, m)
|
| 201 |
+
for i in range(m):
|
| 202 |
+
zeta[i, i] = a * ((data.loc[:, scaling_vars[eta[i]]].std()) ** 2)
|
| 203 |
+
for i in range(m):
|
| 204 |
+
for j in range(m):
|
| 205 |
+
zeta[i, j] = zeta[j, i] = a * np.sqrt(zeta[i, i] * zeta[j, j])
|
| 206 |
+
|
| 207 |
+
elif method.lower() == "iv":
|
| 208 |
+
raise NotImplementedError("IV initialization not supported yet.")
|
| 209 |
+
|
| 210 |
+
return B, zeta
|
| 211 |
+
|
| 212 |
+
def fit(
|
| 213 |
+
self,
|
| 214 |
+
data,
|
| 215 |
+
method,
|
| 216 |
+
opt="adam",
|
| 217 |
+
init_values="random",
|
| 218 |
+
exit_delta=1e-4,
|
| 219 |
+
max_iter=1000,
|
| 220 |
+
**kwargs,
|
| 221 |
+
):
|
| 222 |
+
"""
|
| 223 |
+
Estimate the parameters of the model from the data.
|
| 224 |
+
|
| 225 |
+
Parameters
|
| 226 |
+
----------
|
| 227 |
+
data: pandas DataFrame or pgmpy.data.Data instance
|
| 228 |
+
The data from which to estimate the parameters of the model.
|
| 229 |
+
|
| 230 |
+
method: str ("ml"|"uls"|"gls"|"2sls")
|
| 231 |
+
The fitting function to use.
|
| 232 |
+
ML : Maximum Likelihood
|
| 233 |
+
ULS: Unweighted Least Squares
|
| 234 |
+
GLS: Generalized Least Squares
|
| 235 |
+
2sls: 2-SLS estimator
|
| 236 |
+
|
| 237 |
+
init_values: str or dict
|
| 238 |
+
Options for str: random | std | iv
|
| 239 |
+
dict: dictionary with keys `B` and `zeta`.
|
| 240 |
+
|
| 241 |
+
**kwargs: dict
|
| 242 |
+
Extra parameters required in case of some estimators.
|
| 243 |
+
GLS:
|
| 244 |
+
W: np.array (n x n) where n is the number of observe variables.
|
| 245 |
+
2sls:
|
| 246 |
+
x:
|
| 247 |
+
y:
|
| 248 |
+
|
| 249 |
+
Returns
|
| 250 |
+
-------
|
| 251 |
+
pgmpy.model.SEM instance: Instance of the model with estimated parameters
|
| 252 |
+
|
| 253 |
+
References
|
| 254 |
+
----------
|
| 255 |
+
.. [1] Bollen, K. A. (2010). Structural equations with latent variables. New York: Wiley.
|
| 256 |
+
"""
|
| 257 |
+
# Check if given arguments are valid
|
| 258 |
+
if not isinstance(data, pd.DataFrame):
|
| 259 |
+
raise ValueError(f"data must be a pandas DataFrame. Got type: {type(data)}")
|
| 260 |
+
|
| 261 |
+
if not sorted(data.columns) == sorted(self.model.y):
|
| 262 |
+
raise ValueError(
|
| 263 |
+
f"The column names data do not match the variables in the model. Expected: {sorted(self.model.observed)}. Got: {sorted(data.columns)}"
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
# Initialize the values of parameters as tensors.
|
| 267 |
+
if isinstance(init_values, dict):
|
| 268 |
+
B_init, zeta_init = init_values["B"], init_values["zeta"]
|
| 269 |
+
else:
|
| 270 |
+
B_init, zeta_init = self.get_init_values(data, method=init_values.lower())
|
| 271 |
+
B = torch.tensor(
|
| 272 |
+
B_init, device=config.DEVICE, dtype=config.DTYPE, requires_grad=True
|
| 273 |
+
)
|
| 274 |
+
zeta = torch.tensor(
|
| 275 |
+
zeta_init, device=config.DEVICE, dtype=config.DTYPE, requires_grad=True
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
# Compute the covariance of the data
|
| 279 |
+
variable_order = self.model.y
|
| 280 |
+
S = data.cov().reindex(variable_order, axis=1).reindex(variable_order, axis=0)
|
| 281 |
+
S = torch.tensor(
|
| 282 |
+
S.values, device=config.DEVICE, dtype=config.DTYPE, requires_grad=False
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
# Optimize the parameters
|
| 286 |
+
if method.lower() == "ml":
|
| 287 |
+
params = optimize(
|
| 288 |
+
self.ml_loss,
|
| 289 |
+
params={"B": B, "zeta": zeta},
|
| 290 |
+
loss_args={"S": S},
|
| 291 |
+
opt=opt,
|
| 292 |
+
exit_delta=exit_delta,
|
| 293 |
+
max_iter=max_iter,
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
elif method.lower() == "uls":
|
| 297 |
+
params = optimize(
|
| 298 |
+
self.uls_loss,
|
| 299 |
+
params={"B": B, "zeta": zeta},
|
| 300 |
+
loss_args={"S": S},
|
| 301 |
+
opt=opt,
|
| 302 |
+
exit_delta=exit_delta,
|
| 303 |
+
max_iter=max_iter,
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
elif method.lower() == "gls":
|
| 307 |
+
W = torch.tensor(
|
| 308 |
+
kwargs["W"],
|
| 309 |
+
device=config.DEVICE,
|
| 310 |
+
dtype=config.DTYPE,
|
| 311 |
+
requires_grad=False,
|
| 312 |
+
)
|
| 313 |
+
params = optimize(
|
| 314 |
+
self.gls_loss,
|
| 315 |
+
params={"B": B, "zeta": zeta},
|
| 316 |
+
loss_args={"S": S, "W": W},
|
| 317 |
+
opt=opt,
|
| 318 |
+
exit_delta=exit_delta,
|
| 319 |
+
max_iter=max_iter,
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
elif method.lower() == "2sls" or method.lower() == "2-sls":
|
| 323 |
+
raise NotImplementedError("2-SLS is not implemented yet")
|
| 324 |
+
|
| 325 |
+
B = params["B"] * self.B_mask + self.B_fixed_mask
|
| 326 |
+
zeta = params["zeta"] * self.zeta_mask + self.zeta_fixed_mask
|
| 327 |
+
|
| 328 |
+
# Compute goodness of fit statistics.
|
| 329 |
+
N = data.shape[0]
|
| 330 |
+
sample_cov = S.detach().numpy()
|
| 331 |
+
sigma_hat = self._get_implied_cov(B, zeta).detach().numpy()
|
| 332 |
+
residual = sample_cov - sigma_hat
|
| 333 |
+
|
| 334 |
+
norm_residual = np.zeros(residual.shape)
|
| 335 |
+
for i in range(norm_residual.shape[0]):
|
| 336 |
+
for j in range(norm_residual.shape[1]):
|
| 337 |
+
norm_residual[i, j] = (sample_cov[i, j] - sigma_hat[i, j]) / np.sqrt(
|
| 338 |
+
((sigma_hat[i, i] * sigma_hat[j, j]) + (sigma_hat[i, j] ** 2)) / N
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
# Compute chi-square value.
|
| 342 |
+
likelihood_ratio = -(N - 1) * (
|
| 343 |
+
np.log(np.linalg.det(sigma_hat))
|
| 344 |
+
+ (np.linalg.inv(sigma_hat) @ S).trace()
|
| 345 |
+
- np.log(np.linalg.det(S))
|
| 346 |
+
- S.shape[0]
|
| 347 |
+
)
|
| 348 |
+
if method.lower() == "ml":
|
| 349 |
+
error = self.ml_loss(params, loss_args={"S": S})
|
| 350 |
+
elif method.lower() == "uls":
|
| 351 |
+
error = self.uls_loss(params, loss_args={"S": S})
|
| 352 |
+
elif method.lower() == "gls":
|
| 353 |
+
error = self.gls_loss(params, loss_args={"S": S, "W": W})
|
| 354 |
+
chi_square = likelihood_ratio / error.detach().numpy()
|
| 355 |
+
|
| 356 |
+
free_params = self.B_mask.sum()
|
| 357 |
+
dof = ((S.shape[0] * (S.shape[0] + 1)) / 2) - free_params
|
| 358 |
+
|
| 359 |
+
summary = {
|
| 360 |
+
"Sample Size": N,
|
| 361 |
+
"Sample Covariance": sample_cov,
|
| 362 |
+
"Model Implied Covariance": sigma_hat,
|
| 363 |
+
"Residual": residual,
|
| 364 |
+
"Normalized Residual": norm_residual,
|
| 365 |
+
"chi_square": chi_square,
|
| 366 |
+
"dof": dof,
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
# Update the model with the learned params
|
| 370 |
+
self.model.set_params(
|
| 371 |
+
B=params["B"].detach().numpy(), zeta=params["B"].detach().numpy()
|
| 372 |
+
)
|
| 373 |
+
return summary
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
class IVEstimator:
|
| 377 |
+
"""
|
| 378 |
+
Initialize IVEstimator object.
|
| 379 |
+
|
| 380 |
+
Parameters
|
| 381 |
+
----------
|
| 382 |
+
model: pgmpy.models.SEM
|
| 383 |
+
The model for which estimation need to be done.
|
| 384 |
+
|
| 385 |
+
Examples
|
| 386 |
+
--------
|
| 387 |
+
"""
|
| 388 |
+
|
| 389 |
+
def __init__(self, model):
|
| 390 |
+
self.model = model
|
| 391 |
+
|
| 392 |
+
def fit(self, X, Y, data, ivs=None, civs=None):
|
| 393 |
+
"""
|
| 394 |
+
Estimates the parameter X -> Y.
|
| 395 |
+
|
| 396 |
+
Parameters
|
| 397 |
+
----------
|
| 398 |
+
X: str
|
| 399 |
+
The covariate variable of the parameter being estimated.
|
| 400 |
+
|
| 401 |
+
Y: str
|
| 402 |
+
The predictor variable of the parameter being estimated.
|
| 403 |
+
|
| 404 |
+
data: pd.DataFrame
|
| 405 |
+
The data from which to learn the parameter.
|
| 406 |
+
|
| 407 |
+
ivs: List (default: None)
|
| 408 |
+
List of variable names which should be used as Instrumental Variables (IV).
|
| 409 |
+
If not specified, tries to find the IVs from the model structure, fails if
|
| 410 |
+
can't find either IV or Conditional IV.
|
| 411 |
+
|
| 412 |
+
civs: List of tuples (tuple form: (var, coditional_var))
|
| 413 |
+
List of conditional IVs to use for estimation.
|
| 414 |
+
If not specified, tries to find the IVs from the model structure, fails if
|
| 415 |
+
can't find either IV or Conditional IVs.
|
| 416 |
+
|
| 417 |
+
Examples
|
| 418 |
+
--------
|
| 419 |
+
>>> from pgmpy.estimators import IVEstimator # TODO: Finish example.
|
| 420 |
+
"""
|
| 421 |
+
if (ivs is None) and (civs is None):
|
| 422 |
+
ivs = self.model.get_ivs(X, Y)
|
| 423 |
+
civs = self.model.get_conditional_ivs(X, Y)
|
| 424 |
+
|
| 425 |
+
civs = [civ for civ in civs if civ[0] not in ivs]
|
| 426 |
+
|
| 427 |
+
reg_covars = []
|
| 428 |
+
for var in self.model.graph.predecessors(X):
|
| 429 |
+
if var in self.model.observed:
|
| 430 |
+
reg_covars.append(var)
|
| 431 |
+
|
| 432 |
+
# Get CIV conditionals
|
| 433 |
+
civ_conditionals = []
|
| 434 |
+
for civ in civs:
|
| 435 |
+
civ_conditionals.extend(civ[1])
|
| 436 |
+
|
| 437 |
+
# First stage regression.
|
| 438 |
+
params = (
|
| 439 |
+
sm.OLS(data.loc[:, X], data.loc[:, reg_covars + civ_conditionals])
|
| 440 |
+
.fit()
|
| 441 |
+
.params
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
data["X_pred"] = np.zeros(data.shape[0])
|
| 445 |
+
for var in reg_covars:
|
| 446 |
+
data.X_pred += params[var] * data.loc[:, var]
|
| 447 |
+
|
| 448 |
+
summary = sm.OLS(
|
| 449 |
+
data.loc[:, Y], data.loc[:, ["X_pred"] + civ_conditionals]
|
| 450 |
+
).fit()
|
| 451 |
+
return summary.params["X_pred"], summary
|
testbed/pgmpy__pgmpy/pgmpy/estimators/TreeSearch.py
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
from itertools import combinations
|
| 4 |
+
|
| 5 |
+
import networkx as nx
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from joblib import Parallel, delayed
|
| 9 |
+
from sklearn.metrics import (
|
| 10 |
+
adjusted_mutual_info_score,
|
| 11 |
+
mutual_info_score,
|
| 12 |
+
normalized_mutual_info_score,
|
| 13 |
+
)
|
| 14 |
+
from tqdm.auto import tqdm
|
| 15 |
+
|
| 16 |
+
from pgmpy import config
|
| 17 |
+
from pgmpy.base import DAG
|
| 18 |
+
from pgmpy.estimators import StructureEstimator
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class TreeSearch(StructureEstimator):
|
| 22 |
+
"""
|
| 23 |
+
Search class for learning tree related graph structure. The algorithms
|
| 24 |
+
supported are Chow-Liu and Tree-augmented naive bayes (TAN).
|
| 25 |
+
|
| 26 |
+
Chow-Liu constructs the maximum-weight spanning tree with mutual information
|
| 27 |
+
score as edge weights.
|
| 28 |
+
|
| 29 |
+
TAN is an extension of Naive Bayes classifier to allow a tree structure over
|
| 30 |
+
the independent variables to account for interaction.
|
| 31 |
+
|
| 32 |
+
Parameters
|
| 33 |
+
----------
|
| 34 |
+
data: pandas.DataFrame object
|
| 35 |
+
dataframe object where each column represents one variable.
|
| 36 |
+
|
| 37 |
+
root_node: str, int, or any hashable python object, default is None.
|
| 38 |
+
The root node of the tree structure. If None then root node is auto-picked
|
| 39 |
+
as the node with the highest sum of edge weights.
|
| 40 |
+
|
| 41 |
+
n_jobs: int (default: -1)
|
| 42 |
+
Number of jobs to run in parallel. `-1` means use all processors.
|
| 43 |
+
|
| 44 |
+
References
|
| 45 |
+
----------
|
| 46 |
+
[1] Chow, C. K.; Liu, C.N. (1968), "Approximating discrete probability
|
| 47 |
+
distributions with dependence trees", IEEE Transactions on Information
|
| 48 |
+
Theory, IT-14 (3): 462–467
|
| 49 |
+
|
| 50 |
+
[2] Friedman N, Geiger D and Goldszmidt M (1997). Bayesian network classifiers.
|
| 51 |
+
Machine Learning 29: 131–163
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self, data, root_node=None, n_jobs=-1, **kwargs):
|
| 55 |
+
if root_node is not None and root_node not in data.columns:
|
| 56 |
+
raise ValueError(f"Root node: {root_node} not found in data columns.")
|
| 57 |
+
|
| 58 |
+
self.data = data
|
| 59 |
+
self.root_node = root_node
|
| 60 |
+
self.n_jobs = n_jobs
|
| 61 |
+
|
| 62 |
+
super(TreeSearch, self).__init__(data, **kwargs)
|
| 63 |
+
|
| 64 |
+
def estimate(
|
| 65 |
+
self,
|
| 66 |
+
estimator_type="chow-liu",
|
| 67 |
+
class_node=None,
|
| 68 |
+
edge_weights_fn="mutual_info",
|
| 69 |
+
show_progress=True,
|
| 70 |
+
):
|
| 71 |
+
"""
|
| 72 |
+
Estimate the `DAG` structure that fits best to the given data set without
|
| 73 |
+
parametrization.
|
| 74 |
+
|
| 75 |
+
Parameters
|
| 76 |
+
----------
|
| 77 |
+
estimator_type: str (chow-liu | tan)
|
| 78 |
+
The algorithm to use for estimating the DAG.
|
| 79 |
+
|
| 80 |
+
class_node: string, int or any hashable python object. (optional)
|
| 81 |
+
Needed only if estimator_type = 'tan'. In the estimated DAG, there would be
|
| 82 |
+
edges from class_node to each of the feature variables.
|
| 83 |
+
|
| 84 |
+
edge_weights_fn: str or function (default: mutual info)
|
| 85 |
+
Method to use for computing edge weights. By default, Mutual Info Score is
|
| 86 |
+
used.
|
| 87 |
+
|
| 88 |
+
show_progress: boolean
|
| 89 |
+
If True, shows a progress bar for the running algorithm.
|
| 90 |
+
|
| 91 |
+
Returns
|
| 92 |
+
-------
|
| 93 |
+
Estimated Model: pgmpy.base.DAG
|
| 94 |
+
The estimated model structure.
|
| 95 |
+
|
| 96 |
+
Examples
|
| 97 |
+
--------
|
| 98 |
+
>>> import numpy as np
|
| 99 |
+
>>> import pandas as pd
|
| 100 |
+
>>> import networkx as nx
|
| 101 |
+
>>> import matplotlib.pyplot as plt
|
| 102 |
+
>>> from pgmpy.estimators import TreeSearch
|
| 103 |
+
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
|
| 104 |
+
... columns=['A', 'B', 'C', 'D', 'E'])
|
| 105 |
+
>>> est = TreeSearch(values, root_node='B')
|
| 106 |
+
>>> model = est.estimate(estimator_type='chow-liu')
|
| 107 |
+
>>> nx.draw_circular(model, with_labels=True, arrowsize=20, arrowstyle='fancy',
|
| 108 |
+
... alpha=0.3)
|
| 109 |
+
>>> plt.show()
|
| 110 |
+
>>> est = TreeSearch(values)
|
| 111 |
+
>>> model = est.estimate(estimator_type='chow-liu')
|
| 112 |
+
>>> nx.draw_circular(model, with_labels=True, arrowsize=20, arrowstyle='fancy',
|
| 113 |
+
... alpha=0.3)
|
| 114 |
+
>>> plt.show()
|
| 115 |
+
>>> est = TreeSearch(values, root_node='B')
|
| 116 |
+
>>> model = est.estimate(estimator_type='tan', class_node='A')
|
| 117 |
+
>>> nx.draw_circular(model, with_labels=True, arrowsize=20, arrowstyle='fancy',
|
| 118 |
+
... alpha=0.3)
|
| 119 |
+
>>> plt.show()
|
| 120 |
+
>>> est = TreeSearch(values)
|
| 121 |
+
>>> model = est.estimate(estimator_type='tan')
|
| 122 |
+
>>> nx.draw_circular(model, with_labels=True, arrowsize=20, arrowstyle='fancy',
|
| 123 |
+
... alpha=0.3)
|
| 124 |
+
>>> plt.show()
|
| 125 |
+
"""
|
| 126 |
+
# Step 1. Argument checks
|
| 127 |
+
# Step 1.1: Only chow-liu and tan allowed as estimator type.
|
| 128 |
+
if estimator_type not in {"chow-liu", "tan"}:
|
| 129 |
+
raise ValueError(
|
| 130 |
+
f"Invalid estimator_type. Expected either chow-liu or tan. Got: {estimator_type}"
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# Step 1.2: If estimator_type=tan, class_node must be specified
|
| 134 |
+
if estimator_type == "tan" and class_node is None:
|
| 135 |
+
raise ValueError(
|
| 136 |
+
f"class_node argument must be specified for estimator_type='tan'"
|
| 137 |
+
)
|
| 138 |
+
if estimator_type == "tan" and class_node not in self.data.columns:
|
| 139 |
+
raise ValueError(f"Class node: {class_node} not found in data columns")
|
| 140 |
+
|
| 141 |
+
# Step 1.3: If root_node isn't specified, get the node with the highest score.
|
| 142 |
+
weights_computed = False
|
| 143 |
+
if self.root_node is None:
|
| 144 |
+
weights = TreeSearch._get_weights(
|
| 145 |
+
self.data, edge_weights_fn, self.n_jobs, show_progress
|
| 146 |
+
)
|
| 147 |
+
weights_computed = True
|
| 148 |
+
sum_weights = weights.sum(axis=0)
|
| 149 |
+
maxw_idx = np.argsort(sum_weights)[::-1]
|
| 150 |
+
self.root_node = self.data.columns[maxw_idx[0]]
|
| 151 |
+
|
| 152 |
+
# Step 2. Compute all edge weights.
|
| 153 |
+
if estimator_type == "chow-liu":
|
| 154 |
+
if not weights_computed:
|
| 155 |
+
weights = TreeSearch._get_weights(
|
| 156 |
+
self.data, edge_weights_fn, self.n_jobs, show_progress
|
| 157 |
+
)
|
| 158 |
+
else:
|
| 159 |
+
weights = TreeSearch._get_conditional_weights(
|
| 160 |
+
self.data, class_node, edge_weights_fn, self.n_jobs, show_progress
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# Step 3: If estimator_type = "chow-liu", estimate the DAG and return.
|
| 164 |
+
if estimator_type == "chow-liu":
|
| 165 |
+
return TreeSearch._create_tree_and_dag(
|
| 166 |
+
weights, self.data.columns, self.root_node
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
# Step 4: If estimator_type = "tan":
|
| 170 |
+
elif estimator_type == "tan":
|
| 171 |
+
# Step 4.1: Checks root_node != class_node
|
| 172 |
+
if self.root_node == class_node:
|
| 173 |
+
raise ValueError(
|
| 174 |
+
f"Root node: {self.root_node} and class node: {class_node} are identical"
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
# Step 4.2: Construct chow-liu DAG on {data.columns - class_node}
|
| 178 |
+
class_node_idx = np.where(self.data.columns == class_node)[0][0]
|
| 179 |
+
weights = np.delete(weights, class_node_idx, axis=0)
|
| 180 |
+
weights = np.delete(weights, class_node_idx, axis=1)
|
| 181 |
+
reduced_columns = np.delete(self.data.columns, class_node_idx)
|
| 182 |
+
D = TreeSearch._create_tree_and_dag(
|
| 183 |
+
weights, reduced_columns, self.root_node
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# Step 4.3: Add edges from class_node to all other nodes.
|
| 187 |
+
D.add_edges_from([(class_node, node) for node in reduced_columns])
|
| 188 |
+
return D
|
| 189 |
+
|
| 190 |
+
@staticmethod
|
| 191 |
+
def _get_weights(
|
| 192 |
+
data, edge_weights_fn="mutual_info", n_jobs=-1, show_progress=True
|
| 193 |
+
):
|
| 194 |
+
"""
|
| 195 |
+
Helper function to Chow-Liu algorithm for estimating tree structure from given data. Refer to
|
| 196 |
+
pgmpy.estimators.TreeSearch for more details. This function returns the edge weights matrix.
|
| 197 |
+
|
| 198 |
+
Parameters
|
| 199 |
+
----------
|
| 200 |
+
data: pandas.DataFrame object
|
| 201 |
+
dataframe object where each column represents one variable.
|
| 202 |
+
|
| 203 |
+
edge_weights_fn: str or function (default: mutual_info)
|
| 204 |
+
Method to use for computing edge weights. Options are:
|
| 205 |
+
1. 'mutual_info': Mutual Information Score.
|
| 206 |
+
2. 'adjusted_mutual_info': Adjusted Mutual Information Score.
|
| 207 |
+
3. 'normalized_mutual_info': Normalized Mutual Information Score.
|
| 208 |
+
4. function(array[n_samples,], array[n_samples,]): Custom function.
|
| 209 |
+
|
| 210 |
+
n_jobs: int (default: -1)
|
| 211 |
+
Number of jobs to run in parallel. `-1` means use all processors.
|
| 212 |
+
|
| 213 |
+
show_progress: boolean
|
| 214 |
+
If True, shows a progress bar for the running algorithm.
|
| 215 |
+
|
| 216 |
+
Returns
|
| 217 |
+
-------
|
| 218 |
+
weights: numpy 2D array, shape = (n_columns, n_columns)
|
| 219 |
+
symmetric matrix where each element represents an edge weight.
|
| 220 |
+
|
| 221 |
+
Examples
|
| 222 |
+
--------
|
| 223 |
+
>>> import numpy as np
|
| 224 |
+
>>> import pandas as pd
|
| 225 |
+
>>> from pgmpy.estimators import TreeSearch
|
| 226 |
+
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
|
| 227 |
+
... columns=['A', 'B', 'C', 'D', 'E'])
|
| 228 |
+
>>> est = TreeSearch(values, root_node='B')
|
| 229 |
+
>>> model = est.estimate(estimator_type='chow-liu')
|
| 230 |
+
"""
|
| 231 |
+
# Step 0: Check for edge weight computation method
|
| 232 |
+
if edge_weights_fn == "mutual_info":
|
| 233 |
+
edge_weights_fn = mutual_info_score
|
| 234 |
+
elif edge_weights_fn == "adjusted_mutual_info":
|
| 235 |
+
edge_weights_fn = adjusted_mutual_info_score
|
| 236 |
+
elif edge_weights_fn == "normalized_mutual_info":
|
| 237 |
+
edge_weights_fn = normalized_mutual_info_score
|
| 238 |
+
elif not callable(edge_weights_fn):
|
| 239 |
+
raise ValueError(
|
| 240 |
+
f"edge_weights_fn should either be 'mutual_info', 'adjusted_mutual_info', "
|
| 241 |
+
f"'normalized_mutual_info'or a function of form fun(array, array). Got: f{edge_weights_fn}"
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
# Step 1: Compute edge weights for a fully connected graph.
|
| 245 |
+
n_vars = len(data.columns)
|
| 246 |
+
pbar = combinations(data.columns, 2)
|
| 247 |
+
if show_progress and config.SHOW_PROGRESS:
|
| 248 |
+
pbar = tqdm(pbar, total=(n_vars * (n_vars - 1) / 2), desc="Building tree")
|
| 249 |
+
|
| 250 |
+
vals = Parallel(n_jobs=n_jobs)(
|
| 251 |
+
delayed(edge_weights_fn)(data.loc[:, u], data.loc[:, v]) for u, v in pbar
|
| 252 |
+
)
|
| 253 |
+
weights = np.zeros((n_vars, n_vars))
|
| 254 |
+
indices = np.triu_indices(n_vars, k=1)
|
| 255 |
+
weights[indices] = vals
|
| 256 |
+
weights.T[indices] = vals
|
| 257 |
+
|
| 258 |
+
return weights
|
| 259 |
+
|
| 260 |
+
@staticmethod
|
| 261 |
+
def _get_conditional_weights(
|
| 262 |
+
data, class_node, edge_weights_fn="mutual_info", n_jobs=-1, show_progress=True
|
| 263 |
+
):
|
| 264 |
+
"""
|
| 265 |
+
Helper function to TAN (Tree Augmented Naive Bayes) algorithm for
|
| 266 |
+
estimating tree structure from given data. Refer to
|
| 267 |
+
pgmpy.estimators.TreeSearch for more details. This function returns the
|
| 268 |
+
edge weights matrix.
|
| 269 |
+
|
| 270 |
+
Parameters
|
| 271 |
+
----------
|
| 272 |
+
data: pandas.DataFrame object
|
| 273 |
+
dataframe object where each column represents one variable.
|
| 274 |
+
|
| 275 |
+
class_node: str
|
| 276 |
+
The class node for TAN. The edge weight is computed as I(X, Y | class_node).
|
| 277 |
+
|
| 278 |
+
edge_weights_fn: str or function (default: mutual_info)
|
| 279 |
+
Method to use for computing edge weights. Options are:
|
| 280 |
+
1. 'mutual_info': Mutual Information Score.
|
| 281 |
+
2. 'adjusted_mutual_info': Adjusted Mutual Information Score.
|
| 282 |
+
3. 'normalized_mutual_info': Normalized Mutual Information Score.
|
| 283 |
+
4. function(array[n_samples,], array[n_samples,]): Custom function.
|
| 284 |
+
|
| 285 |
+
n_jobs: int (default: -1)
|
| 286 |
+
Number of jobs to run in parallel. `-1` means use all processors.
|
| 287 |
+
|
| 288 |
+
show_progress: boolean
|
| 289 |
+
If True, shows a progress bar for the running algorithm.
|
| 290 |
+
|
| 291 |
+
Returns
|
| 292 |
+
-------
|
| 293 |
+
weights: numpy 2D array, shape = (n_columns, n_columns)
|
| 294 |
+
symmetric matrix where each element represents an edge weight.
|
| 295 |
+
|
| 296 |
+
Examples
|
| 297 |
+
--------
|
| 298 |
+
>>> import numpy as np
|
| 299 |
+
>>> import pandas as pd
|
| 300 |
+
>>> from pgmpy.estimators import TreeSearch
|
| 301 |
+
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
|
| 302 |
+
... columns=['A', 'B', 'C', 'D', 'E'])
|
| 303 |
+
>>> est = TreeSearch(values, root_node='B')
|
| 304 |
+
>>> model = est.estimate(estimator_type='tan')
|
| 305 |
+
"""
|
| 306 |
+
# Step 0: Check for edge weight computation method
|
| 307 |
+
if edge_weights_fn == "mutual_info":
|
| 308 |
+
edge_weights_fn = mutual_info_score
|
| 309 |
+
elif edge_weights_fn == "adjusted_mutual_info":
|
| 310 |
+
edge_weights_fn = adjusted_mutual_info_score
|
| 311 |
+
elif edge_weights_fn == "normalized_mutual_info":
|
| 312 |
+
edge_weights_fn = normalized_mutual_info_score
|
| 313 |
+
elif not callable(edge_weights_fn):
|
| 314 |
+
raise ValueError(
|
| 315 |
+
f"edge_weights_fn should either be 'mutual_info', 'adjusted_mutual_info', "
|
| 316 |
+
f"'normalized_mutual_info'or a function of form fun(array, array). Got: f{edge_weights_fn}"
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
# Step 1: Compute edge weights for a fully connected graph.
|
| 320 |
+
n_vars = len(data.columns)
|
| 321 |
+
pbar = combinations(data.columns, 2)
|
| 322 |
+
if show_progress and config.SHOW_PROGRESS:
|
| 323 |
+
pbar = tqdm(pbar, total=(n_vars * (n_vars - 1) / 2), desc="Building tree")
|
| 324 |
+
|
| 325 |
+
def _conditional_edge_weights_fn(u, v):
|
| 326 |
+
"""
|
| 327 |
+
Computes the conditional edge weight of variable index u and v conditioned on class_node
|
| 328 |
+
"""
|
| 329 |
+
cond_marginal = data.loc[:, class_node].value_counts() / data.shape[0]
|
| 330 |
+
cond_edge_weight = 0
|
| 331 |
+
for index, marg_prob in cond_marginal.items():
|
| 332 |
+
df_cond_subset = data[data.loc[:, class_node] == index]
|
| 333 |
+
cond_edge_weight += marg_prob * edge_weights_fn(
|
| 334 |
+
df_cond_subset.loc[:, u], df_cond_subset.loc[:, v]
|
| 335 |
+
)
|
| 336 |
+
return cond_edge_weight
|
| 337 |
+
|
| 338 |
+
vals = Parallel(n_jobs=n_jobs)(
|
| 339 |
+
delayed(_conditional_edge_weights_fn)(u, v) for u, v in pbar
|
| 340 |
+
)
|
| 341 |
+
weights = np.zeros((n_vars, n_vars))
|
| 342 |
+
indices = np.triu_indices(n_vars, k=1)
|
| 343 |
+
weights[indices] = vals
|
| 344 |
+
weights.T[indices] = vals
|
| 345 |
+
|
| 346 |
+
return weights
|
| 347 |
+
|
| 348 |
+
@staticmethod
|
| 349 |
+
def _create_tree_and_dag(weights, columns, root_node):
|
| 350 |
+
"""
|
| 351 |
+
Helper function to Chow-Liu algorithm for estimating tree structure from given data. Refer to
|
| 352 |
+
pgmpy.estimators.TreeSearch for more details. This function returns the DAG based on the edge weights matrix.
|
| 353 |
+
|
| 354 |
+
Parameters
|
| 355 |
+
----------
|
| 356 |
+
weights: numpy 2D array, shape = (n_columns, n_columns)
|
| 357 |
+
symmetric matrix where each element represents an edge weight.
|
| 358 |
+
|
| 359 |
+
columns: list or array
|
| 360 |
+
Names of the columns (& rows) of the weights matrix.
|
| 361 |
+
|
| 362 |
+
root_node: str, int, or any hashable python object.
|
| 363 |
+
The root node of the tree structure.
|
| 364 |
+
|
| 365 |
+
Returns
|
| 366 |
+
-------
|
| 367 |
+
model: pgmpy.base.DAG
|
| 368 |
+
The estimated model structure.
|
| 369 |
+
|
| 370 |
+
Examples
|
| 371 |
+
--------
|
| 372 |
+
>>> import numpy as np
|
| 373 |
+
>>> import pandas as pd
|
| 374 |
+
>>> from pgmpy.estimators import TreeSearch
|
| 375 |
+
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
|
| 376 |
+
... columns=['A', 'B', 'C', 'D', 'E'])
|
| 377 |
+
>>> est = TreeSearch(values, root_node='B')
|
| 378 |
+
>>> model = est.estimate(estimator_type='chow-liu')
|
| 379 |
+
"""
|
| 380 |
+
# Step 2: Compute the maximum spanning tree using the weights.
|
| 381 |
+
T = nx.maximum_spanning_tree(
|
| 382 |
+
nx.from_pandas_adjacency(
|
| 383 |
+
pd.DataFrame(weights, index=columns, columns=columns),
|
| 384 |
+
create_using=nx.Graph,
|
| 385 |
+
)
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
# Step 3: Create DAG by directing edges away from root node and return
|
| 389 |
+
D = nx.bfs_tree(T, root_node)
|
| 390 |
+
return DAG(D)
|
testbed/pgmpy__pgmpy/pgmpy/estimators/__init__.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pgmpy.estimators.base import BaseEstimator, ParameterEstimator, StructureEstimator
|
| 2 |
+
from pgmpy.estimators.MLE import MaximumLikelihoodEstimator
|
| 3 |
+
from pgmpy.estimators.BayesianEstimator import BayesianEstimator
|
| 4 |
+
from pgmpy.estimators.StructureScore import (
|
| 5 |
+
StructureScore,
|
| 6 |
+
K2,
|
| 7 |
+
BDeu,
|
| 8 |
+
BDs,
|
| 9 |
+
BIC,
|
| 10 |
+
BICGauss,
|
| 11 |
+
BICCondGauss,
|
| 12 |
+
AIC,
|
| 13 |
+
AICGauss,
|
| 14 |
+
AICCondGauss,
|
| 15 |
+
LogLikelihoodGauss,
|
| 16 |
+
LogLikelihoodCondGauss,
|
| 17 |
+
)
|
| 18 |
+
from pgmpy.estimators.ExhaustiveSearch import ExhaustiveSearch
|
| 19 |
+
from pgmpy.estimators.HillClimbSearch import HillClimbSearch
|
| 20 |
+
from pgmpy.estimators.TreeSearch import TreeSearch
|
| 21 |
+
from pgmpy.estimators.SEMEstimator import SEMEstimator, IVEstimator
|
| 22 |
+
from pgmpy.estimators.ScoreCache import ScoreCache
|
| 23 |
+
from pgmpy.estimators.MmhcEstimator import MmhcEstimator
|
| 24 |
+
from pgmpy.estimators.EM import ExpectationMaximization
|
| 25 |
+
from pgmpy.estimators.PC import PC
|
| 26 |
+
from pgmpy.estimators.base import MarginalEstimator
|
| 27 |
+
from pgmpy.estimators.MirrorDescentEstimator import MirrorDescentEstimator
|
| 28 |
+
from pgmpy.estimators.expert import ExpertInLoop
|
| 29 |
+
from pgmpy.estimators.GES import GES
|
| 30 |
+
|
| 31 |
+
__all__ = [
|
| 32 |
+
"BaseEstimator",
|
| 33 |
+
"ParameterEstimator",
|
| 34 |
+
"MaximumLikelihoodEstimator",
|
| 35 |
+
"BayesianEstimator",
|
| 36 |
+
"StructureEstimator",
|
| 37 |
+
"ExhaustiveSearch",
|
| 38 |
+
"HillClimbSearch",
|
| 39 |
+
"TreeSearch",
|
| 40 |
+
"StructureScore",
|
| 41 |
+
"K2",
|
| 42 |
+
"BDeu",
|
| 43 |
+
"BDs",
|
| 44 |
+
"BIC",
|
| 45 |
+
"BICGauss",
|
| 46 |
+
"AIC",
|
| 47 |
+
"AICGauss",
|
| 48 |
+
"ScoreCache",
|
| 49 |
+
"SEMEstimator",
|
| 50 |
+
"IVEstimator",
|
| 51 |
+
"MmhcEstimator",
|
| 52 |
+
"PC",
|
| 53 |
+
"ExpertInLoop",
|
| 54 |
+
"ExpectationMaximization",
|
| 55 |
+
"MarginalEstimator",
|
| 56 |
+
"MirrorDescentEstimator",
|
| 57 |
+
"GES",
|
| 58 |
+
"LogLikelihoodGauss",
|
| 59 |
+
"LogLikelihoodCondGauss",
|
| 60 |
+
"AICCondGauss",
|
| 61 |
+
"BICCondGauss",
|
| 62 |
+
]
|
testbed/pgmpy__pgmpy/pgmpy/estimators/base.py
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
|
| 8 |
+
from pgmpy.factors import FactorDict
|
| 9 |
+
from pgmpy.factors.discrete import DiscreteFactor
|
| 10 |
+
from pgmpy.inference.ExactInference import BeliefPropagation
|
| 11 |
+
from pgmpy.utils import preprocess_data
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BaseEstimator(object):
|
| 15 |
+
"""
|
| 16 |
+
Base class for estimators in pgmpy; `ParameterEstimator`,
|
| 17 |
+
`StructureEstimator` and `StructureScore` derive from this class.
|
| 18 |
+
|
| 19 |
+
Parameters
|
| 20 |
+
----------
|
| 21 |
+
data: pandas DataFrame object
|
| 22 |
+
object where each column represents one variable.
|
| 23 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 24 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 25 |
+
|
| 26 |
+
state_names: dict (optional)
|
| 27 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 28 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 29 |
+
are taken to be the only possible states.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, data=None, state_names=None):
|
| 33 |
+
if data is None:
|
| 34 |
+
self.data = None
|
| 35 |
+
self.dtypes = None
|
| 36 |
+
else:
|
| 37 |
+
self.data, self.dtypes = preprocess_data(data)
|
| 38 |
+
|
| 39 |
+
# data can be None in the case when learning structure from
|
| 40 |
+
# independence conditions. Look into PC.py.
|
| 41 |
+
if self.data is not None:
|
| 42 |
+
self.variables = list(data.columns.values)
|
| 43 |
+
|
| 44 |
+
if not isinstance(state_names, dict):
|
| 45 |
+
self.state_names = {
|
| 46 |
+
var: self._collect_state_names(var) for var in self.variables
|
| 47 |
+
}
|
| 48 |
+
else:
|
| 49 |
+
self.state_names = dict()
|
| 50 |
+
for var in self.variables:
|
| 51 |
+
if var in state_names:
|
| 52 |
+
if not set(self._collect_state_names(var)) <= set(
|
| 53 |
+
state_names[var]
|
| 54 |
+
):
|
| 55 |
+
raise ValueError(
|
| 56 |
+
f"Data contains unexpected states for variable: {var}."
|
| 57 |
+
)
|
| 58 |
+
self.state_names[var] = state_names[var]
|
| 59 |
+
else:
|
| 60 |
+
self.state_names[var] = self._collect_state_names(var)
|
| 61 |
+
|
| 62 |
+
def _collect_state_names(self, variable):
|
| 63 |
+
"Return a list of states that the variable takes in the data."
|
| 64 |
+
states = sorted(list(self.data.loc[:, variable].dropna().unique()))
|
| 65 |
+
return states
|
| 66 |
+
|
| 67 |
+
def state_counts(
|
| 68 |
+
self,
|
| 69 |
+
variable,
|
| 70 |
+
parents=[],
|
| 71 |
+
weighted=False,
|
| 72 |
+
reindex=True,
|
| 73 |
+
):
|
| 74 |
+
"""
|
| 75 |
+
Return counts how often each state of 'variable' occurred in the data.
|
| 76 |
+
If a list of parents is provided, counting is done conditionally
|
| 77 |
+
for each state configuration of the parents.
|
| 78 |
+
|
| 79 |
+
Parameters
|
| 80 |
+
----------
|
| 81 |
+
variable: string
|
| 82 |
+
Name of the variable for which the state count is to be done.
|
| 83 |
+
|
| 84 |
+
parents: list
|
| 85 |
+
Optional list of variable parents, if conditional counting is desired.
|
| 86 |
+
Order of parents in list is reflected in the returned DataFrame
|
| 87 |
+
|
| 88 |
+
weighted: bool
|
| 89 |
+
If True, data must have a `_weight` column specifying the weight of the
|
| 90 |
+
datapoint (row). If False, each datapoint has a weight of `1`.
|
| 91 |
+
|
| 92 |
+
reindex: bool
|
| 93 |
+
If True, returns a data frame with all possible parents state combinations
|
| 94 |
+
as the columns. If False, drops the state combinations which are not
|
| 95 |
+
present in the data.
|
| 96 |
+
|
| 97 |
+
Returns
|
| 98 |
+
-------
|
| 99 |
+
state_counts: pandas.DataFrame
|
| 100 |
+
Table with state counts for 'variable'
|
| 101 |
+
|
| 102 |
+
Examples
|
| 103 |
+
--------
|
| 104 |
+
>>> import pandas as pd
|
| 105 |
+
>>> from pgmpy.estimators import BaseEstimator
|
| 106 |
+
>>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'],
|
| 107 |
+
'B': ['b1', 'b2', 'b1'],
|
| 108 |
+
'C': ['c1', 'c1', 'c2']})
|
| 109 |
+
>>> estimator = BaseEstimator(data)
|
| 110 |
+
>>> estimator.state_counts('A')
|
| 111 |
+
A
|
| 112 |
+
a1 2
|
| 113 |
+
a2 1
|
| 114 |
+
>>> estimator.state_counts('C', parents=['A', 'B'])
|
| 115 |
+
A a1 a2
|
| 116 |
+
B b1 b2 b1 b2
|
| 117 |
+
C
|
| 118 |
+
c1 1 1 0 0
|
| 119 |
+
c2 0 0 1 0
|
| 120 |
+
>>> estimator.state_counts('C', parents=['A'])
|
| 121 |
+
A a1 a2
|
| 122 |
+
C
|
| 123 |
+
c1 2.0 0.0
|
| 124 |
+
c2 0.0 1.0
|
| 125 |
+
"""
|
| 126 |
+
parents = list(parents)
|
| 127 |
+
|
| 128 |
+
if weighted and ("_weight" not in self.data.columns):
|
| 129 |
+
raise ValueError("data must contain a `_weight` column if weighted=True")
|
| 130 |
+
|
| 131 |
+
if not parents:
|
| 132 |
+
# count how often each state of 'variable' occurred
|
| 133 |
+
if weighted:
|
| 134 |
+
state_count_data = self.data.groupby([variable], observed=True)[
|
| 135 |
+
"_weight"
|
| 136 |
+
].sum()
|
| 137 |
+
else:
|
| 138 |
+
state_count_data = self.data.loc[:, variable].value_counts()
|
| 139 |
+
|
| 140 |
+
state_counts = (
|
| 141 |
+
state_count_data.reindex(self.state_names[variable])
|
| 142 |
+
.fillna(0)
|
| 143 |
+
.to_frame()
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
else:
|
| 147 |
+
parents_states = [self.state_names[parent] for parent in parents]
|
| 148 |
+
# count how often each state of 'variable' occurred, conditional on parents' states
|
| 149 |
+
if weighted:
|
| 150 |
+
state_count_data = (
|
| 151 |
+
self.data.groupby([variable] + parents, observed=True)["_weight"]
|
| 152 |
+
.sum()
|
| 153 |
+
.unstack(parents)
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
else:
|
| 157 |
+
state_count_data = (
|
| 158 |
+
self.data.groupby([variable] + parents, observed=True)
|
| 159 |
+
.size()
|
| 160 |
+
.unstack(parents)
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
if not isinstance(state_count_data.columns, pd.MultiIndex):
|
| 164 |
+
state_count_data.columns = pd.MultiIndex.from_arrays(
|
| 165 |
+
[state_count_data.columns]
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
if reindex:
|
| 169 |
+
# reindex rows & columns to sort them and to add missing ones
|
| 170 |
+
# missing row = some state of 'variable' did not occur in data
|
| 171 |
+
# missing column = some state configuration of current 'variable's parents
|
| 172 |
+
# did not occur in data
|
| 173 |
+
row_index = self.state_names[variable]
|
| 174 |
+
column_index = pd.MultiIndex.from_product(parents_states, names=parents)
|
| 175 |
+
state_counts = state_count_data.reindex(
|
| 176 |
+
index=row_index, columns=column_index
|
| 177 |
+
).fillna(0)
|
| 178 |
+
else:
|
| 179 |
+
state_counts = state_count_data.fillna(0)
|
| 180 |
+
|
| 181 |
+
return state_counts
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class ParameterEstimator(BaseEstimator):
|
| 185 |
+
"""
|
| 186 |
+
Base class for parameter estimators in pgmpy.
|
| 187 |
+
|
| 188 |
+
Parameters
|
| 189 |
+
----------
|
| 190 |
+
model: pgmpy.models.BayesianNetwork or pgmpy.models.MarkovNetwork model
|
| 191 |
+
for which parameter estimation is to be done.
|
| 192 |
+
|
| 193 |
+
data: pandas DataFrame object
|
| 194 |
+
dataframe object with column names identical to the variable names of the model.
|
| 195 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 196 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 197 |
+
|
| 198 |
+
state_names: dict (optional)
|
| 199 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 200 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 201 |
+
are taken to be the only possible states.
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
def __init__(self, model, data, **kwargs):
|
| 205 |
+
"""
|
| 206 |
+
Base class for parameter estimators in pgmpy.
|
| 207 |
+
|
| 208 |
+
Parameters
|
| 209 |
+
----------
|
| 210 |
+
model: pgmpy.models.BayesianNetwork or pgmpy.models.MarkovNetwork model
|
| 211 |
+
for which parameter estimation is to be done.
|
| 212 |
+
|
| 213 |
+
data: pandas DataFrame object
|
| 214 |
+
dataframe object with column names identical to the variable names of the model.
|
| 215 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 216 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 217 |
+
|
| 218 |
+
state_names: dict (optional)
|
| 219 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 220 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 221 |
+
are taken to be the only possible states.
|
| 222 |
+
|
| 223 |
+
complete_samples_only: bool (optional, default `True`)
|
| 224 |
+
Specifies how to deal with missing data, if present. If set to `True` all rows
|
| 225 |
+
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
|
| 226 |
+
every row where neither the variable nor its parents are `np.nan` is used.
|
| 227 |
+
This sets the behavior of the `state_count`-method.
|
| 228 |
+
"""
|
| 229 |
+
self.model = model
|
| 230 |
+
|
| 231 |
+
super(ParameterEstimator, self).__init__(data, **kwargs)
|
| 232 |
+
|
| 233 |
+
def state_counts(self, variable, weighted=False, **kwargs):
|
| 234 |
+
"""
|
| 235 |
+
Return counts how often each state of 'variable' occurred in the data.
|
| 236 |
+
If the variable has parents, counting is done conditionally
|
| 237 |
+
for each state configuration of the parents.
|
| 238 |
+
|
| 239 |
+
Parameters
|
| 240 |
+
----------
|
| 241 |
+
variable: string
|
| 242 |
+
Name of the variable for which the state count is to be done.
|
| 243 |
+
|
| 244 |
+
Returns
|
| 245 |
+
-------
|
| 246 |
+
state_counts: pandas.DataFrame
|
| 247 |
+
Table with state counts for 'variable'
|
| 248 |
+
|
| 249 |
+
Examples
|
| 250 |
+
--------
|
| 251 |
+
>>> import pandas as pd
|
| 252 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 253 |
+
>>> from pgmpy.estimators import ParameterEstimator
|
| 254 |
+
>>> model = BayesianNetwork([('A', 'C'), ('B', 'C')])
|
| 255 |
+
>>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'],
|
| 256 |
+
'B': ['b1', 'b2', 'b1'],
|
| 257 |
+
'C': ['c1', 'c1', 'c2']})
|
| 258 |
+
>>> estimator = ParameterEstimator(model, data)
|
| 259 |
+
>>> estimator.state_counts('A')
|
| 260 |
+
A
|
| 261 |
+
a1 2
|
| 262 |
+
a2 1
|
| 263 |
+
>>> estimator.state_counts('C')
|
| 264 |
+
A a1 a2
|
| 265 |
+
B b1 b2 b1 b2
|
| 266 |
+
C
|
| 267 |
+
c1 1 1 0 0
|
| 268 |
+
c2 0 0 1 0
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
parents = sorted(self.model.get_parents(variable))
|
| 272 |
+
return super(ParameterEstimator, self).state_counts(
|
| 273 |
+
variable, parents=parents, weighted=weighted, **kwargs
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class StructureEstimator(BaseEstimator):
|
| 278 |
+
"""
|
| 279 |
+
Base class for structure estimators in pgmpy.
|
| 280 |
+
|
| 281 |
+
Parameters
|
| 282 |
+
----------
|
| 283 |
+
data: pandas DataFrame object
|
| 284 |
+
dataframe object where each column represents one variable.
|
| 285 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 286 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 287 |
+
|
| 288 |
+
state_names: dict (optional)
|
| 289 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 290 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 291 |
+
are taken to be the only possible states.
|
| 292 |
+
"""
|
| 293 |
+
|
| 294 |
+
def __init__(self, data=None, independencies=None, **kwargs):
|
| 295 |
+
self.independencies = independencies
|
| 296 |
+
if self.independencies is not None:
|
| 297 |
+
self.variables = self.independencies.get_all_variables()
|
| 298 |
+
|
| 299 |
+
super(StructureEstimator, self).__init__(data=data, **kwargs)
|
| 300 |
+
|
| 301 |
+
def estimate(self):
|
| 302 |
+
pass
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class MarginalEstimator(BaseEstimator):
|
| 306 |
+
"""
|
| 307 |
+
Base class for marginal estimators in pgmpy.
|
| 308 |
+
|
| 309 |
+
Parameters
|
| 310 |
+
----------
|
| 311 |
+
model: MarkovNetwork | FactorGraph | JunctionTree
|
| 312 |
+
A model to optimize, using Belief Propagation and an estimation method.
|
| 313 |
+
|
| 314 |
+
data: pandas DataFrame object
|
| 315 |
+
dataframe object where each column represents one variable.
|
| 316 |
+
(If some values in the data are missing the data cells should be set to `numpy.nan`.
|
| 317 |
+
Note that pandas converts each column containing `numpy.nan`s to dtype `float`.)
|
| 318 |
+
|
| 319 |
+
state_names: dict (optional)
|
| 320 |
+
A dict indicating, for each variable, the discrete set of states (or values)
|
| 321 |
+
that the variable can take. If unspecified, the observed values in the data set
|
| 322 |
+
are taken to be the only possible states.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
def __init__(self, model, data, **kwargs):
|
| 326 |
+
super().__init__(data, **kwargs)
|
| 327 |
+
self.belief_propagation = BeliefPropagation(model=model)
|
| 328 |
+
self.theta = None
|
| 329 |
+
|
| 330 |
+
@staticmethod
|
| 331 |
+
def _clique_to_marginal(marginals, clique_nodes):
|
| 332 |
+
"""
|
| 333 |
+
Construct a minimal mapping from cliques to marginals.
|
| 334 |
+
|
| 335 |
+
Parameters
|
| 336 |
+
----------
|
| 337 |
+
marginals: FactorDict
|
| 338 |
+
A mapping from cliques to factors.
|
| 339 |
+
|
| 340 |
+
clique_nodes: List[Tuple[str, ...]]
|
| 341 |
+
Cliques that exist within a different FactorDict.
|
| 342 |
+
|
| 343 |
+
Returns
|
| 344 |
+
-------
|
| 345 |
+
clique_to_marginal: A mapping from clique to a list of marginals
|
| 346 |
+
such that each clique is a super set of the marginals it is associated with.
|
| 347 |
+
"""
|
| 348 |
+
clique_to_marginal = defaultdict(lambda: [])
|
| 349 |
+
for marginal_clique, marginal in marginals.items():
|
| 350 |
+
for clique in clique_nodes:
|
| 351 |
+
if set(marginal_clique) <= set(clique):
|
| 352 |
+
clique_to_marginal[clique].append(marginal)
|
| 353 |
+
break
|
| 354 |
+
else:
|
| 355 |
+
raise ValueError(
|
| 356 |
+
"Could not find a corresponding clique for"
|
| 357 |
+
+ f" marginal: {marginal_clique}"
|
| 358 |
+
+ f" out of cliques: {clique_nodes}"
|
| 359 |
+
)
|
| 360 |
+
return clique_to_marginal
|
| 361 |
+
|
| 362 |
+
def _marginal_loss(self, marginals, clique_to_marginal, metric):
|
| 363 |
+
"""
|
| 364 |
+
Compute the loss and gradient for a given dictionary of clique beliefs.
|
| 365 |
+
|
| 366 |
+
Parameters
|
| 367 |
+
----------
|
| 368 |
+
marginals: FactorDict
|
| 369 |
+
A mapping from a clique to an observed marginal represented by a `DiscreteFactor`.
|
| 370 |
+
|
| 371 |
+
clique_to_marginal: Dict[Tuple[str, ...], List[DiscreteFactor]]
|
| 372 |
+
A mapping from a Junction Tree's clique to a list of corresponding marginals
|
| 373 |
+
such that a clique is a superset of the marginal with the constraint that
|
| 374 |
+
each marginal only appears once across all cliques.
|
| 375 |
+
|
| 376 |
+
metric: str
|
| 377 |
+
One of either 'L1' or 'L2'.
|
| 378 |
+
|
| 379 |
+
Returns
|
| 380 |
+
-------
|
| 381 |
+
Loss and gradient of the loss: Tuple[float, pgmpy.factors.FactorDict.FactorDict]
|
| 382 |
+
Marginal loss and the gradients of the loss with respect to the estimated beliefs.
|
| 383 |
+
"""
|
| 384 |
+
loss = 0.0
|
| 385 |
+
gradient = FactorDict({})
|
| 386 |
+
|
| 387 |
+
for clique, mu in marginals.items():
|
| 388 |
+
# Initialize a gradient for this clique as zero.
|
| 389 |
+
gradient[clique] = mu.identity_factor() * 0
|
| 390 |
+
|
| 391 |
+
# Iterate over all marginals involving this clique.
|
| 392 |
+
for y in clique_to_marginal[clique]:
|
| 393 |
+
# Step 1: Marginalize the clique to the size of `y`.
|
| 394 |
+
projection_variables = list(set(mu.scope()) - set(y.scope()))
|
| 395 |
+
mu2 = mu.marginalize(
|
| 396 |
+
variables=projection_variables,
|
| 397 |
+
inplace=False,
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
if not isinstance(mu2, DiscreteFactor):
|
| 401 |
+
raise TypeError(f"Expecting a DiscreteFactor but found {type(mu2)}")
|
| 402 |
+
|
| 403 |
+
# Step 2: Compute the difference between the `mu2` and `y`.
|
| 404 |
+
diff_factor = mu2 + (y * -1)
|
| 405 |
+
|
| 406 |
+
if not diff_factor:
|
| 407 |
+
raise ValueError("An error occured when calculating the gradient.")
|
| 408 |
+
|
| 409 |
+
diff = diff_factor.values.flatten()
|
| 410 |
+
|
| 411 |
+
# Step 3: Compute the loss and gradient based upon the metric.
|
| 412 |
+
if metric == "L1":
|
| 413 |
+
loss += abs(diff).sum()
|
| 414 |
+
grad = diff.sign() if hasattr(diff, "sign") else np.sign(diff)
|
| 415 |
+
elif metric == "L2":
|
| 416 |
+
loss += 0.5 * (diff @ diff)
|
| 417 |
+
grad = diff
|
| 418 |
+
else:
|
| 419 |
+
raise ValueError("Metric must be one of L1 or L2.")
|
| 420 |
+
|
| 421 |
+
# Step 4: Update the gradient from this marginal.
|
| 422 |
+
gradient[clique] += DiscreteFactor(
|
| 423 |
+
variables=mu2.scope(),
|
| 424 |
+
cardinality=mu2.cardinality,
|
| 425 |
+
values=grad,
|
| 426 |
+
state_names=mu2.state_names,
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
return loss, gradient
|
| 430 |
+
|
| 431 |
+
def estimate(self):
|
| 432 |
+
pass
|
testbed/pgmpy__pgmpy/pgmpy/estimators/expert.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from itertools import combinations
|
| 3 |
+
|
| 4 |
+
import networkx as nx
|
| 5 |
+
import pandas as pd
|
| 6 |
+
|
| 7 |
+
from pgmpy import config
|
| 8 |
+
from pgmpy.base import DAG
|
| 9 |
+
from pgmpy.estimators import StructureEstimator
|
| 10 |
+
from pgmpy.estimators.CITests import pillai_trace
|
| 11 |
+
from pgmpy.utils import llm_pairwise_orient, manual_pairwise_orient
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ExpertInLoop(StructureEstimator):
|
| 15 |
+
def __init__(self, data=None, **kwargs):
|
| 16 |
+
super(ExpertInLoop, self).__init__(data=data, **kwargs)
|
| 17 |
+
|
| 18 |
+
def test_all(self, dag):
|
| 19 |
+
"""
|
| 20 |
+
Runs CI tests on all possible combinations of variables in `dag`.
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
dag: pgmpy.base.DAG
|
| 25 |
+
The DAG on which to run the tests.
|
| 26 |
+
|
| 27 |
+
Returns
|
| 28 |
+
-------
|
| 29 |
+
pd.DataFrame: The results with p-values and effect sizes of all the tests.
|
| 30 |
+
"""
|
| 31 |
+
cis = []
|
| 32 |
+
for u, v in combinations(list(dag.nodes()), 2):
|
| 33 |
+
u_parents = set(dag.get_parents(u))
|
| 34 |
+
v_parents = set(dag.get_parents(v))
|
| 35 |
+
|
| 36 |
+
if v in u_parents:
|
| 37 |
+
u_parents -= set([v])
|
| 38 |
+
edge_present = True
|
| 39 |
+
elif u in v_parents:
|
| 40 |
+
v_parents -= set([u])
|
| 41 |
+
edge_present = True
|
| 42 |
+
else:
|
| 43 |
+
edge_present = False
|
| 44 |
+
|
| 45 |
+
cond_set = list(set(u_parents).union(v_parents))
|
| 46 |
+
effect, p_value = pillai_trace(
|
| 47 |
+
X=u, Y=v, Z=cond_set, data=self.data, boolean=False
|
| 48 |
+
)
|
| 49 |
+
cis.append([u, v, cond_set, edge_present, effect, p_value])
|
| 50 |
+
|
| 51 |
+
return pd.DataFrame(
|
| 52 |
+
cis, columns=["u", "v", "z", "edge_present", "effect", "p_val"]
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def estimate(
|
| 56 |
+
self,
|
| 57 |
+
pval_threshold=0.05,
|
| 58 |
+
effect_size_threshold=0.05,
|
| 59 |
+
use_llm=True,
|
| 60 |
+
llm_model="gemini/gemini-1.5-flash",
|
| 61 |
+
variable_descriptions=None,
|
| 62 |
+
show_progress=True,
|
| 63 |
+
**kwargs,
|
| 64 |
+
):
|
| 65 |
+
"""
|
| 66 |
+
Estimates a DAG from the data by utilizing expert knowledge.
|
| 67 |
+
|
| 68 |
+
The method iteratively adds and removes edges between variables
|
| 69 |
+
(similar to Greedy Equivalence Search algorithm) based on a score
|
| 70 |
+
metric that improves the model's fit to the data the most. The score
|
| 71 |
+
metric used is based on conditional independence testing. When adding
|
| 72 |
+
an edge to the model, the method asks for expert knowledge to decide
|
| 73 |
+
the orientation of the edge. Alternatively, an LLM can used to decide
|
| 74 |
+
the orientation of the edge.
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
pval_threshold: float
|
| 79 |
+
The p-value threshold to use for the test to determine whether
|
| 80 |
+
there is a significant association between the variables or not.
|
| 81 |
+
|
| 82 |
+
effect_size_threshold: float
|
| 83 |
+
The effect size threshold to use to suggest a new edge. If the
|
| 84 |
+
conditional effect size between two variables is greater than the
|
| 85 |
+
threshold, the algorithm would suggest to add an edge between them.
|
| 86 |
+
And if the effect size for an edge is less than the threshold,
|
| 87 |
+
would suggest to remove the edge.
|
| 88 |
+
|
| 89 |
+
use_llm: bool
|
| 90 |
+
Whether to use a Large Language Model for edge orientation. If
|
| 91 |
+
False, prompts the user to specify the direction between the edges.
|
| 92 |
+
|
| 93 |
+
llm_model: str (default: gemini/gemini-1.5-flash)
|
| 94 |
+
The LLM model to use. Please refer to litellm documentation (https://docs.litellm.ai/docs/providers)
|
| 95 |
+
for available model options. Default is gemini-1.5-flash
|
| 96 |
+
|
| 97 |
+
variable_descriptions: dict
|
| 98 |
+
A dict of the form {var: description} giving a text description of
|
| 99 |
+
each variable in the model.
|
| 100 |
+
|
| 101 |
+
show_progress: bool (default: True)
|
| 102 |
+
If True, prints info of the running status.
|
| 103 |
+
|
| 104 |
+
kwargs: kwargs
|
| 105 |
+
Any additional parameters to pass to litellm.completion method.
|
| 106 |
+
Please refer documentation at: https://docs.litellm.ai/docs/completion/input#input-params-1
|
| 107 |
+
|
| 108 |
+
Returns
|
| 109 |
+
-------
|
| 110 |
+
pgmpy.base.DAG: A DAG representing the learned causal structure.
|
| 111 |
+
|
| 112 |
+
Examples
|
| 113 |
+
--------
|
| 114 |
+
>>> from pgmpy.utils import get_example_model
|
| 115 |
+
>>> from pgmpy.estimators import ExpertInLoop
|
| 116 |
+
>>> model = get_example_model('cancer')
|
| 117 |
+
>>> df = model.simulate(int(1e3))
|
| 118 |
+
>>> variable_descriptions = {
|
| 119 |
+
... "Smoker": "A binary variable representing whether a person smokes or not.",
|
| 120 |
+
... "Cancer": "A binary variable representing whether a person has cancer. ",
|
| 121 |
+
... "Xray": "A binary variable representing the result of an X-ray test.",
|
| 122 |
+
... "Pollution": "A binary variable representing whether the person is in a high-pollution area or not."
|
| 123 |
+
... "Dyspnoea": "A binary variable representing whether a person has shortness of breath. "}
|
| 124 |
+
>>> dag = ExpertInLoop(df).estimate(
|
| 125 |
+
... effect_size_threshold=0.0001,
|
| 126 |
+
... use_llm=True,
|
| 127 |
+
... variable_descriptions=variable_descriptions)
|
| 128 |
+
>>> dag.edges()
|
| 129 |
+
OutEdgeView([('Smoker', 'Cancer'), ('Cancer', 'Xray'), ('Cancer', 'Dyspnoea'), ('Pollution', 'Cancer')])
|
| 130 |
+
"""
|
| 131 |
+
# Step 0: Create a new DAG on all the variables with no edge.
|
| 132 |
+
nodes = list(self.data.columns)
|
| 133 |
+
dag = DAG()
|
| 134 |
+
dag.add_nodes_from(nodes)
|
| 135 |
+
|
| 136 |
+
blacklisted_edges = []
|
| 137 |
+
while True:
|
| 138 |
+
# Step 1: Compute effects and p-values between every combination of variables.
|
| 139 |
+
all_effects = self.test_all(dag)
|
| 140 |
+
|
| 141 |
+
# Step 2: Remove any edges between variables that are not sufficiently associated.
|
| 142 |
+
edge_effects = all_effects[all_effects.edge_present == True]
|
| 143 |
+
edge_effects = edge_effects[
|
| 144 |
+
(edge_effects.effect < effect_size_threshold)
|
| 145 |
+
& (edge_effects.p_val > pval_threshold)
|
| 146 |
+
]
|
| 147 |
+
remove_edges = list(edge_effects.loc[:, ("u", "v")].to_records(index=False))
|
| 148 |
+
for edge in remove_edges:
|
| 149 |
+
dag.remove_edge(edge[0], edge[1])
|
| 150 |
+
|
| 151 |
+
# Step 3: Add edge between variables which have significant association.
|
| 152 |
+
nonedge_effects = all_effects[all_effects.edge_present == False]
|
| 153 |
+
nonedge_effects = nonedge_effects[
|
| 154 |
+
(nonedge_effects.effect >= effect_size_threshold)
|
| 155 |
+
& (nonedge_effects.p_val <= pval_threshold)
|
| 156 |
+
]
|
| 157 |
+
|
| 158 |
+
# Step 3.2: Else determine the edge direction and add it if not in blacklisted_edges.
|
| 159 |
+
if len(blacklisted_edges) > 0:
|
| 160 |
+
blacklisted_edges_us = [edge[0] for edge in blacklisted_edges]
|
| 161 |
+
blacklisted_edges_vs = [edge[1] for edge in blacklisted_edges]
|
| 162 |
+
nonedge_effects = nonedge_effects.loc[
|
| 163 |
+
~(
|
| 164 |
+
(
|
| 165 |
+
nonedge_effects.u.isin(blacklisted_edges_us)
|
| 166 |
+
& nonedge_effects.v.isin(blacklisted_edges_vs)
|
| 167 |
+
)
|
| 168 |
+
| (
|
| 169 |
+
nonedge_effects.u.isin(blacklisted_edges_vs)
|
| 170 |
+
& nonedge_effects.v.isin(blacklisted_edges_us)
|
| 171 |
+
)
|
| 172 |
+
),
|
| 173 |
+
:,
|
| 174 |
+
]
|
| 175 |
+
|
| 176 |
+
# Step 3.1: Exit loop if all correlations in data are explained by the model.
|
| 177 |
+
if (edge_effects.shape[0] == 0) and (nonedge_effects.shape[0] == 0):
|
| 178 |
+
break
|
| 179 |
+
|
| 180 |
+
selected_edge = nonedge_effects.iloc[nonedge_effects.effect.argmax()]
|
| 181 |
+
if use_llm:
|
| 182 |
+
edge_direction = llm_pairwise_orient(
|
| 183 |
+
selected_edge.u,
|
| 184 |
+
selected_edge.v,
|
| 185 |
+
variable_descriptions,
|
| 186 |
+
llm_model=llm_model,
|
| 187 |
+
**kwargs,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
if config.SHOW_PROGRESS and show_progress:
|
| 191 |
+
sys.stdout.write(
|
| 192 |
+
f"\rQueried for edge orientation between {selected_edge.u} and {selected_edge.v}. Got: {edge_direction[0]} -> {edge_direction[1]}"
|
| 193 |
+
)
|
| 194 |
+
sys.stdout.flush()
|
| 195 |
+
|
| 196 |
+
else:
|
| 197 |
+
edge_direction = manual_pairwise_orient(
|
| 198 |
+
selected_edge.u, selected_edge.v
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
# Step 3.3: Blacklist the edge if it creates a cycle, else add it to the DAG.
|
| 202 |
+
if nx.has_path(dag, edge_direction[1], edge_direction[0]):
|
| 203 |
+
blacklisted_edges.append(edge_direction)
|
| 204 |
+
else:
|
| 205 |
+
dag.add_edges_from([edge_direction])
|
| 206 |
+
|
| 207 |
+
return dag
|
testbed/pgmpy__pgmpy/pgmpy/extern/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .tabulate import tabulate
|
| 2 |
+
|
| 3 |
+
__all__ = ["tabulate"]
|
testbed/pgmpy__pgmpy/pgmpy/extern/tabulate.py
ADDED
|
@@ -0,0 +1,970 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
# LICENSE INFORMATION
|
| 4 |
+
#
|
| 5 |
+
# Copyright (c) 2011-2013 Sergey Astanin
|
| 6 |
+
#
|
| 7 |
+
# Permission is hereby granted, free of charge, to any person obtaining
|
| 8 |
+
# a copy of this software and associated documentation files (the
|
| 9 |
+
# "Software"), to deal in the Software without restriction, including
|
| 10 |
+
# without limitation the rights to use, copy, modify, merge, publish,
|
| 11 |
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
| 12 |
+
# permit persons to whom the Software is furnished to do so, subject to
|
| 13 |
+
# the following conditions:
|
| 14 |
+
#
|
| 15 |
+
# The above copyright notice and this permission notice shall be
|
| 16 |
+
# included in all copies or substantial portions of the Software.
|
| 17 |
+
#
|
| 18 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 19 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 20 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 21 |
+
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
| 22 |
+
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
| 23 |
+
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
| 24 |
+
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 25 |
+
|
| 26 |
+
"""Pretty-print tabular data."""
|
| 27 |
+
|
| 28 |
+
from __future__ import print_function
|
| 29 |
+
from __future__ import unicode_literals
|
| 30 |
+
from collections import namedtuple
|
| 31 |
+
from platform import python_version_tuple
|
| 32 |
+
import re
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
if python_version_tuple()[0] < "3":
|
| 36 |
+
from itertools import izip_longest
|
| 37 |
+
from functools import partial
|
| 38 |
+
|
| 39 |
+
_none_type = type(None)
|
| 40 |
+
_int_type = int
|
| 41 |
+
_float_type = float
|
| 42 |
+
_text_type = unicode
|
| 43 |
+
_binary_type = str
|
| 44 |
+
else:
|
| 45 |
+
from itertools import zip_longest as izip_longest
|
| 46 |
+
from functools import reduce, partial
|
| 47 |
+
|
| 48 |
+
_none_type = type(None)
|
| 49 |
+
_int_type = int
|
| 50 |
+
_float_type = float
|
| 51 |
+
_text_type = str
|
| 52 |
+
_binary_type = bytes
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
|
| 56 |
+
__version__ = "0.7.3"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# A table structure is supposed to be:
|
| 66 |
+
#
|
| 67 |
+
# --- lineabove ---------
|
| 68 |
+
# headerrow
|
| 69 |
+
# --- linebelowheader ---
|
| 70 |
+
# datarow
|
| 71 |
+
# --- linebewteenrows ---
|
| 72 |
+
# ... (more datarows) ...
|
| 73 |
+
# --- linebewteenrows ---
|
| 74 |
+
# last datarow
|
| 75 |
+
# --- linebelow ---------
|
| 76 |
+
#
|
| 77 |
+
# TableFormat's line* elements can be
|
| 78 |
+
#
|
| 79 |
+
# - either None, if the element is not used,
|
| 80 |
+
# - or a Line tuple,
|
| 81 |
+
# - or a function: [col_widths], [col_alignments] -> string.
|
| 82 |
+
#
|
| 83 |
+
# TableFormat's *row elements can be
|
| 84 |
+
#
|
| 85 |
+
# - either None, if the element is not used,
|
| 86 |
+
# - or a DataRow tuple,
|
| 87 |
+
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
|
| 88 |
+
#
|
| 89 |
+
# padding (an integer) is the amount of white space around data values.
|
| 90 |
+
#
|
| 91 |
+
# with_header_hide:
|
| 92 |
+
#
|
| 93 |
+
# - either None, to display all table elements unconditionally,
|
| 94 |
+
# - or a list of elements not to be displayed if the table has column headers.
|
| 95 |
+
#
|
| 96 |
+
TableFormat = namedtuple(
|
| 97 |
+
"TableFormat",
|
| 98 |
+
[
|
| 99 |
+
"lineabove",
|
| 100 |
+
"linebelowheader",
|
| 101 |
+
"linebetweenrows",
|
| 102 |
+
"linebelow",
|
| 103 |
+
"headerrow",
|
| 104 |
+
"datarow",
|
| 105 |
+
"padding",
|
| 106 |
+
"with_header_hide",
|
| 107 |
+
],
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _pipe_segment_with_colons(align, colwidth):
|
| 112 |
+
"""Return a segment of a horizontal line with optional colons which
|
| 113 |
+
indicate column's alignment (as in `pipe` output format)."""
|
| 114 |
+
w = colwidth
|
| 115 |
+
if align in ["right", "decimal"]:
|
| 116 |
+
return ("-" * (w - 1)) + ":"
|
| 117 |
+
elif align == "center":
|
| 118 |
+
return ":" + ("-" * (w - 2)) + ":"
|
| 119 |
+
elif align == "left":
|
| 120 |
+
return ":" + ("-" * (w - 1))
|
| 121 |
+
else:
|
| 122 |
+
return "-" * w
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _pipe_line_with_colons(colwidths, colaligns):
|
| 126 |
+
"""Return a horizontal line with optional colons to indicate column's
|
| 127 |
+
alignment (as in `pipe` output format)."""
|
| 128 |
+
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
|
| 129 |
+
return "|" + "|".join(segments) + "|"
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
|
| 133 |
+
alignment = {
|
| 134 |
+
"left": "",
|
| 135 |
+
"right": 'align="right"| ',
|
| 136 |
+
"center": 'align="center"| ',
|
| 137 |
+
"decimal": 'align="right"| ',
|
| 138 |
+
}
|
| 139 |
+
# hard-coded padding _around_ align attribute and value together
|
| 140 |
+
# rather than padding parameter which affects only the value
|
| 141 |
+
values_with_attrs = [
|
| 142 |
+
" " + alignment.get(a, "") + c + " " for c, a in zip(cell_values, colaligns)
|
| 143 |
+
]
|
| 144 |
+
colsep = separator * 2
|
| 145 |
+
return (separator + colsep.join(values_with_attrs)).rstrip()
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def _latex_line_begin_tabular(colwidths, colaligns):
|
| 149 |
+
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
|
| 150 |
+
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
|
| 151 |
+
return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\hline"
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
_table_formats = {
|
| 155 |
+
"simple": TableFormat(
|
| 156 |
+
lineabove=Line("", "-", " ", ""),
|
| 157 |
+
linebelowheader=Line("", "-", " ", ""),
|
| 158 |
+
linebetweenrows=None,
|
| 159 |
+
linebelow=Line("", "-", " ", ""),
|
| 160 |
+
headerrow=DataRow("", " ", ""),
|
| 161 |
+
datarow=DataRow("", " ", ""),
|
| 162 |
+
padding=0,
|
| 163 |
+
with_header_hide=["lineabove", "linebelow"],
|
| 164 |
+
),
|
| 165 |
+
"plain": TableFormat(
|
| 166 |
+
lineabove=None,
|
| 167 |
+
linebelowheader=None,
|
| 168 |
+
linebetweenrows=None,
|
| 169 |
+
linebelow=None,
|
| 170 |
+
headerrow=DataRow("", " ", ""),
|
| 171 |
+
datarow=DataRow("", " ", ""),
|
| 172 |
+
padding=0,
|
| 173 |
+
with_header_hide=None,
|
| 174 |
+
),
|
| 175 |
+
"grid": TableFormat(
|
| 176 |
+
lineabove=Line("+", "-", "+", "+"),
|
| 177 |
+
linebelowheader=Line("+", "=", "+", "+"),
|
| 178 |
+
linebetweenrows=Line("+", "-", "+", "+"),
|
| 179 |
+
linebelow=Line("+", "-", "+", "+"),
|
| 180 |
+
headerrow=DataRow("|", "|", "|"),
|
| 181 |
+
datarow=DataRow("|", "|", "|"),
|
| 182 |
+
padding=1,
|
| 183 |
+
with_header_hide=None,
|
| 184 |
+
),
|
| 185 |
+
"pipe": TableFormat(
|
| 186 |
+
lineabove=_pipe_line_with_colons,
|
| 187 |
+
linebelowheader=_pipe_line_with_colons,
|
| 188 |
+
linebetweenrows=None,
|
| 189 |
+
linebelow=None,
|
| 190 |
+
headerrow=DataRow("|", "|", "|"),
|
| 191 |
+
datarow=DataRow("|", "|", "|"),
|
| 192 |
+
padding=1,
|
| 193 |
+
with_header_hide=["lineabove"],
|
| 194 |
+
),
|
| 195 |
+
"orgtbl": TableFormat(
|
| 196 |
+
lineabove=None,
|
| 197 |
+
linebelowheader=Line("|", "-", "+", "|"),
|
| 198 |
+
linebetweenrows=None,
|
| 199 |
+
linebelow=None,
|
| 200 |
+
headerrow=DataRow("|", "|", "|"),
|
| 201 |
+
datarow=DataRow("|", "|", "|"),
|
| 202 |
+
padding=1,
|
| 203 |
+
with_header_hide=None,
|
| 204 |
+
),
|
| 205 |
+
"rst": TableFormat(
|
| 206 |
+
lineabove=Line("", "=", " ", ""),
|
| 207 |
+
linebelowheader=Line("", "=", " ", ""),
|
| 208 |
+
linebetweenrows=None,
|
| 209 |
+
linebelow=Line("", "=", " ", ""),
|
| 210 |
+
headerrow=DataRow("", " ", ""),
|
| 211 |
+
datarow=DataRow("", " ", ""),
|
| 212 |
+
padding=0,
|
| 213 |
+
with_header_hide=None,
|
| 214 |
+
),
|
| 215 |
+
"mediawiki": TableFormat(
|
| 216 |
+
lineabove=Line(
|
| 217 |
+
'{| class="wikitable" style="text-align: left;"',
|
| 218 |
+
"",
|
| 219 |
+
"",
|
| 220 |
+
"\n|+ <!-- caption -->\n|-",
|
| 221 |
+
),
|
| 222 |
+
linebelowheader=Line("|-", "", "", ""),
|
| 223 |
+
linebetweenrows=Line("|-", "", "", ""),
|
| 224 |
+
linebelow=Line("|}", "", "", ""),
|
| 225 |
+
headerrow=partial(_mediawiki_row_with_attrs, "!"),
|
| 226 |
+
datarow=partial(_mediawiki_row_with_attrs, "|"),
|
| 227 |
+
padding=0,
|
| 228 |
+
with_header_hide=None,
|
| 229 |
+
),
|
| 230 |
+
"latex": TableFormat(
|
| 231 |
+
lineabove=_latex_line_begin_tabular,
|
| 232 |
+
linebelowheader=Line("\\hline", "", "", ""),
|
| 233 |
+
linebetweenrows=None,
|
| 234 |
+
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
|
| 235 |
+
headerrow=DataRow("", "&", "\\\\"),
|
| 236 |
+
datarow=DataRow("", "&", "\\\\"),
|
| 237 |
+
padding=1,
|
| 238 |
+
with_header_hide=None,
|
| 239 |
+
),
|
| 240 |
+
"tsv": TableFormat(
|
| 241 |
+
lineabove=None,
|
| 242 |
+
linebelowheader=None,
|
| 243 |
+
linebetweenrows=None,
|
| 244 |
+
linebelow=None,
|
| 245 |
+
headerrow=DataRow("", "\t", ""),
|
| 246 |
+
datarow=DataRow("", "\t", ""),
|
| 247 |
+
padding=0,
|
| 248 |
+
with_header_hide=None,
|
| 249 |
+
),
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
tabulate_formats = list(sorted(_table_formats.keys()))
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
|
| 257 |
+
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m") # ANSI color codes
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def simple_separated_format(separator):
|
| 261 |
+
r"""Construct a simple TableFormat with columns separated by a separator.
|
| 262 |
+
|
| 263 |
+
>>> tsv = simple_separated_format("\t")
|
| 264 |
+
>>> tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \t 1\nspam\t23'
|
| 265 |
+
True
|
| 266 |
+
|
| 267 |
+
"""
|
| 268 |
+
return TableFormat(
|
| 269 |
+
None,
|
| 270 |
+
None,
|
| 271 |
+
None,
|
| 272 |
+
None,
|
| 273 |
+
headerrow=DataRow("", separator, ""),
|
| 274 |
+
datarow=DataRow("", separator, ""),
|
| 275 |
+
padding=0,
|
| 276 |
+
with_header_hide=None,
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _isconvertible(conv, string):
|
| 281 |
+
try:
|
| 282 |
+
n = conv(string)
|
| 283 |
+
return True
|
| 284 |
+
except ValueError:
|
| 285 |
+
return False
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def _isnumber(string):
|
| 289 |
+
"""
|
| 290 |
+
>>> _isnumber("123.45")
|
| 291 |
+
True
|
| 292 |
+
>>> _isnumber("123")
|
| 293 |
+
True
|
| 294 |
+
>>> _isnumber("spam")
|
| 295 |
+
False
|
| 296 |
+
"""
|
| 297 |
+
return _isconvertible(float, string)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def _isint(string):
|
| 301 |
+
"""
|
| 302 |
+
>>> _isint("123")
|
| 303 |
+
True
|
| 304 |
+
>>> _isint("123.45")
|
| 305 |
+
False
|
| 306 |
+
"""
|
| 307 |
+
return (
|
| 308 |
+
type(string) is int
|
| 309 |
+
or (isinstance(string, _binary_type) or isinstance(string, _text_type))
|
| 310 |
+
and _isconvertible(int, string)
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def _type(string, has_invisible=True):
|
| 315 |
+
"""The least generic type (type(None), int, float, str, unicode).
|
| 316 |
+
|
| 317 |
+
>>> _type(None) is type(None)
|
| 318 |
+
True
|
| 319 |
+
>>> _type("foo") is type("")
|
| 320 |
+
True
|
| 321 |
+
>>> _type("1") is type(1)
|
| 322 |
+
True
|
| 323 |
+
>>> _type('\x1b[31m42\x1b[0m') is type(42)
|
| 324 |
+
True
|
| 325 |
+
>>> _type('\x1b[31m42\x1b[0m') is type(42)
|
| 326 |
+
True
|
| 327 |
+
|
| 328 |
+
"""
|
| 329 |
+
|
| 330 |
+
if has_invisible and (
|
| 331 |
+
isinstance(string, _text_type) or isinstance(string, _binary_type)
|
| 332 |
+
):
|
| 333 |
+
string = _strip_invisible(string)
|
| 334 |
+
|
| 335 |
+
if string is None:
|
| 336 |
+
return _none_type
|
| 337 |
+
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
|
| 338 |
+
return _text_type
|
| 339 |
+
elif _isint(string):
|
| 340 |
+
return int
|
| 341 |
+
elif _isnumber(string):
|
| 342 |
+
return float
|
| 343 |
+
elif isinstance(string, _binary_type):
|
| 344 |
+
return _binary_type
|
| 345 |
+
else:
|
| 346 |
+
return _text_type
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def _afterpoint(string):
|
| 350 |
+
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
|
| 351 |
+
|
| 352 |
+
>>> _afterpoint("123.45")
|
| 353 |
+
2
|
| 354 |
+
>>> _afterpoint("1001")
|
| 355 |
+
-1
|
| 356 |
+
>>> _afterpoint("eggs")
|
| 357 |
+
-1
|
| 358 |
+
>>> _afterpoint("123e45")
|
| 359 |
+
2
|
| 360 |
+
|
| 361 |
+
"""
|
| 362 |
+
if _isnumber(string):
|
| 363 |
+
if _isint(string):
|
| 364 |
+
return -1
|
| 365 |
+
else:
|
| 366 |
+
pos = string.rfind(".")
|
| 367 |
+
pos = string.lower().rfind("e") if pos < 0 else pos
|
| 368 |
+
if pos >= 0:
|
| 369 |
+
return len(string) - pos - 1
|
| 370 |
+
else:
|
| 371 |
+
return -1 # no point
|
| 372 |
+
else:
|
| 373 |
+
return -1 # not a number
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def _padleft(width, s, has_invisible=True):
|
| 377 |
+
"""Flush right.
|
| 378 |
+
|
| 379 |
+
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
|
| 380 |
+
True
|
| 381 |
+
|
| 382 |
+
"""
|
| 383 |
+
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
|
| 384 |
+
fmt = "{0:>%ds}" % iwidth
|
| 385 |
+
return fmt.format(s)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def _padright(width, s, has_invisible=True):
|
| 389 |
+
"""Flush left.
|
| 390 |
+
|
| 391 |
+
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
|
| 392 |
+
True
|
| 393 |
+
|
| 394 |
+
"""
|
| 395 |
+
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
|
| 396 |
+
fmt = "{0:<%ds}" % iwidth
|
| 397 |
+
return fmt.format(s)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def _padboth(width, s, has_invisible=True):
|
| 401 |
+
"""Center string.
|
| 402 |
+
|
| 403 |
+
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
|
| 404 |
+
True
|
| 405 |
+
|
| 406 |
+
"""
|
| 407 |
+
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
|
| 408 |
+
fmt = "{0:^%ds}" % iwidth
|
| 409 |
+
return fmt.format(s)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def _strip_invisible(s):
|
| 413 |
+
"Remove invisible ANSI color codes."
|
| 414 |
+
if isinstance(s, _text_type):
|
| 415 |
+
return re.sub(_invisible_codes, "", s)
|
| 416 |
+
else: # a bytestring
|
| 417 |
+
return re.sub(_invisible_codes_bytes, "", s)
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def _visible_width(s):
|
| 421 |
+
"""Visible width of a printed string. ANSI color codes are removed.
|
| 422 |
+
|
| 423 |
+
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
|
| 424 |
+
(5, 5)
|
| 425 |
+
|
| 426 |
+
"""
|
| 427 |
+
if isinstance(s, _text_type) or isinstance(s, _binary_type):
|
| 428 |
+
return len(_strip_invisible(s))
|
| 429 |
+
else:
|
| 430 |
+
return len(_text_type(s))
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
|
| 434 |
+
"""[string] -> [padded_string]
|
| 435 |
+
|
| 436 |
+
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
|
| 437 |
+
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
|
| 438 |
+
|
| 439 |
+
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
|
| 440 |
+
['123.4', '56.7890']
|
| 441 |
+
|
| 442 |
+
"""
|
| 443 |
+
if alignment == "right":
|
| 444 |
+
strings = [s.strip() for s in strings]
|
| 445 |
+
padfn = _padleft
|
| 446 |
+
elif alignment == "center":
|
| 447 |
+
strings = [s.strip() for s in strings]
|
| 448 |
+
padfn = _padboth
|
| 449 |
+
elif alignment == "decimal":
|
| 450 |
+
decimals = [_afterpoint(s) for s in strings]
|
| 451 |
+
maxdecimals = max(decimals)
|
| 452 |
+
strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)]
|
| 453 |
+
padfn = _padleft
|
| 454 |
+
elif not alignment:
|
| 455 |
+
return strings
|
| 456 |
+
else:
|
| 457 |
+
strings = [s.strip() for s in strings]
|
| 458 |
+
padfn = _padright
|
| 459 |
+
|
| 460 |
+
if has_invisible:
|
| 461 |
+
width_fn = _visible_width
|
| 462 |
+
else:
|
| 463 |
+
width_fn = len
|
| 464 |
+
|
| 465 |
+
maxwidth = max(max(map(width_fn, strings)), minwidth)
|
| 466 |
+
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
|
| 467 |
+
return padded_strings
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def _more_generic(type1, type2):
|
| 471 |
+
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
|
| 472 |
+
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
|
| 473 |
+
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
|
| 474 |
+
return invtypes[moregeneric]
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def _column_type(strings, has_invisible=True):
|
| 478 |
+
"""The least generic type all column values are convertible to.
|
| 479 |
+
|
| 480 |
+
>>> _column_type(["1", "2"]) is _int_type
|
| 481 |
+
True
|
| 482 |
+
>>> _column_type(["1", "2.3"]) is _float_type
|
| 483 |
+
True
|
| 484 |
+
>>> _column_type(["1", "2.3", "four"]) is _text_type
|
| 485 |
+
True
|
| 486 |
+
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
|
| 487 |
+
True
|
| 488 |
+
>>> _column_type([None, "brux"]) is _text_type
|
| 489 |
+
True
|
| 490 |
+
>>> _column_type([1, 2, None]) is _int_type
|
| 491 |
+
True
|
| 492 |
+
>>> import datetime as dt
|
| 493 |
+
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
|
| 494 |
+
True
|
| 495 |
+
|
| 496 |
+
"""
|
| 497 |
+
types = [_type(s, has_invisible) for s in strings]
|
| 498 |
+
return reduce(_more_generic, types, int)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def _format(val, valtype, floatfmt, missingval=""):
|
| 502 |
+
r"""Format a value accoding to its type.
|
| 503 |
+
|
| 504 |
+
Unicode is supported:
|
| 505 |
+
|
| 506 |
+
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430']
|
| 507 |
+
>>> tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]]
|
| 508 |
+
>>> good_result = '\u0431\u0443\u043a\u0432\u0430 \u0446\u0438\u0444\u0440\u0430\n------- -------\n\u0430\u0437 2\n\u0431\u0443\u043a\u0438 4'
|
| 509 |
+
>>> tabulate(tbl, headers=hrow) == good_result
|
| 510 |
+
True
|
| 511 |
+
|
| 512 |
+
"""
|
| 513 |
+
if val is None:
|
| 514 |
+
return missingval
|
| 515 |
+
|
| 516 |
+
if valtype in [int, _text_type]:
|
| 517 |
+
return "{0}".format(val)
|
| 518 |
+
elif valtype is _binary_type:
|
| 519 |
+
try:
|
| 520 |
+
return _text_type(val, "ascii")
|
| 521 |
+
except TypeError:
|
| 522 |
+
return _text_type(val)
|
| 523 |
+
elif valtype is float:
|
| 524 |
+
return format(float(val), floatfmt)
|
| 525 |
+
else:
|
| 526 |
+
return "{0}".format(val)
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def _align_header(header, alignment, width):
|
| 530 |
+
if alignment == "left":
|
| 531 |
+
return _padright(width, header)
|
| 532 |
+
elif alignment == "center":
|
| 533 |
+
return _padboth(width, header)
|
| 534 |
+
elif not alignment:
|
| 535 |
+
return "{0}".format(header)
|
| 536 |
+
else:
|
| 537 |
+
return _padleft(width, header)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def _normalize_tabular_data(tabular_data, headers):
|
| 541 |
+
"""Transform a supported data type to a list of lists, and a list of headers.
|
| 542 |
+
|
| 543 |
+
Supported tabular data types:
|
| 544 |
+
|
| 545 |
+
* list-of-lists or another iterable of iterables
|
| 546 |
+
|
| 547 |
+
* list of named tuples (usually used with headers="keys")
|
| 548 |
+
|
| 549 |
+
* list of dicts (usually used with headers="keys")
|
| 550 |
+
|
| 551 |
+
* list of OrderedDicts (usually used with headers="keys")
|
| 552 |
+
|
| 553 |
+
* 2D NumPy arrays
|
| 554 |
+
|
| 555 |
+
* NumPy record arrays (usually used with headers="keys")
|
| 556 |
+
|
| 557 |
+
* dict of iterables (usually used with headers="keys")
|
| 558 |
+
|
| 559 |
+
* pandas.DataFrame (usually used with headers="keys")
|
| 560 |
+
|
| 561 |
+
The first row can be used as headers if headers="firstrow",
|
| 562 |
+
column indices can be used as headers if headers="keys".
|
| 563 |
+
|
| 564 |
+
"""
|
| 565 |
+
|
| 566 |
+
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
|
| 567 |
+
# dict-like and pandas.DataFrame?
|
| 568 |
+
if hasattr(tabular_data.values, "__call__"):
|
| 569 |
+
# likely a conventional dict
|
| 570 |
+
keys = tabular_data.keys()
|
| 571 |
+
rows = list(
|
| 572 |
+
izip_longest(*tabular_data.values())
|
| 573 |
+
) # columns have to be transposed
|
| 574 |
+
elif hasattr(tabular_data, "index"):
|
| 575 |
+
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
|
| 576 |
+
keys = tabular_data.keys()
|
| 577 |
+
vals = tabular_data.values # values matrix doesn't need to be transposed
|
| 578 |
+
names = tabular_data.index
|
| 579 |
+
rows = [[v] + list(row) for v, row in zip(names, vals)]
|
| 580 |
+
else:
|
| 581 |
+
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
|
| 582 |
+
|
| 583 |
+
if headers == "keys":
|
| 584 |
+
headers = list(map(_text_type, keys)) # headers should be strings
|
| 585 |
+
|
| 586 |
+
else: # it's a usual an iterable of iterables, or a NumPy array
|
| 587 |
+
rows = list(tabular_data)
|
| 588 |
+
|
| 589 |
+
if (
|
| 590 |
+
headers == "keys"
|
| 591 |
+
and hasattr(tabular_data, "dtype")
|
| 592 |
+
and getattr(tabular_data.dtype, "names")
|
| 593 |
+
):
|
| 594 |
+
# numpy record array
|
| 595 |
+
headers = tabular_data.dtype.names
|
| 596 |
+
elif (
|
| 597 |
+
headers == "keys"
|
| 598 |
+
and len(rows) > 0
|
| 599 |
+
and isinstance(rows[0], tuple)
|
| 600 |
+
and hasattr(rows[0], "_fields")
|
| 601 |
+
):
|
| 602 |
+
# namedtuple
|
| 603 |
+
headers = list(map(_text_type, rows[0]._fields))
|
| 604 |
+
elif len(rows) > 0 and isinstance(rows[0], dict):
|
| 605 |
+
# dict or OrderedDict
|
| 606 |
+
uniq_keys = set() # implements hashed lookup
|
| 607 |
+
keys = [] # storage for set
|
| 608 |
+
if headers == "firstrow":
|
| 609 |
+
firstdict = rows[0] if len(rows) > 0 else {}
|
| 610 |
+
keys.extend(firstdict.keys())
|
| 611 |
+
uniq_keys.update(keys)
|
| 612 |
+
rows = rows[1:]
|
| 613 |
+
for row in rows:
|
| 614 |
+
for k in row.keys():
|
| 615 |
+
# Save unique items in input order
|
| 616 |
+
if k not in uniq_keys:
|
| 617 |
+
keys.append(k)
|
| 618 |
+
uniq_keys.add(k)
|
| 619 |
+
if headers == "keys":
|
| 620 |
+
headers = keys
|
| 621 |
+
elif headers == "firstrow" and len(rows) > 0:
|
| 622 |
+
headers = [firstdict.get(k, k) for k in keys]
|
| 623 |
+
headers = list(map(_text_type, headers))
|
| 624 |
+
rows = [[row.get(k) for k in keys] for row in rows]
|
| 625 |
+
elif headers == "keys" and len(rows) > 0:
|
| 626 |
+
# keys are column indices
|
| 627 |
+
headers = list(map(_text_type, range(len(rows[0]))))
|
| 628 |
+
|
| 629 |
+
# take headers from the first row if necessary
|
| 630 |
+
if headers == "firstrow" and len(rows) > 0:
|
| 631 |
+
headers = list(map(_text_type, rows[0])) # headers should be strings
|
| 632 |
+
rows = rows[1:]
|
| 633 |
+
|
| 634 |
+
headers = list(map(_text_type, headers))
|
| 635 |
+
rows = list(map(list, rows))
|
| 636 |
+
|
| 637 |
+
# pad with empty headers for initial columns if necessary
|
| 638 |
+
if headers and len(rows) > 0:
|
| 639 |
+
nhs = len(headers)
|
| 640 |
+
ncols = len(rows[0])
|
| 641 |
+
if nhs < ncols:
|
| 642 |
+
headers = [""] * (ncols - nhs) + headers
|
| 643 |
+
|
| 644 |
+
return rows, headers
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
def tabulate(
|
| 648 |
+
tabular_data,
|
| 649 |
+
headers=[],
|
| 650 |
+
tablefmt="simple",
|
| 651 |
+
floatfmt="g",
|
| 652 |
+
numalign="decimal",
|
| 653 |
+
stralign="left",
|
| 654 |
+
missingval="",
|
| 655 |
+
):
|
| 656 |
+
"""Format a fixed width table for pretty printing.
|
| 657 |
+
|
| 658 |
+
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
|
| 659 |
+
--- ---------
|
| 660 |
+
1 2.34
|
| 661 |
+
-56 8.999
|
| 662 |
+
2 10001
|
| 663 |
+
--- ---------
|
| 664 |
+
|
| 665 |
+
The first required argument (`tabular_data`) can be a
|
| 666 |
+
list-of-lists (or another iterable of iterables), a list of named
|
| 667 |
+
tuples, a dictionary of iterables, an iterable of dictionaries,
|
| 668 |
+
a two-dimensional NumPy array, NumPy record array, or a Pandas'
|
| 669 |
+
dataframe.
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
Table headers
|
| 673 |
+
-------------
|
| 674 |
+
|
| 675 |
+
To print nice column headers, supply the second argument (`headers`):
|
| 676 |
+
|
| 677 |
+
- `headers` can be an explicit list of column headers
|
| 678 |
+
- if `headers="firstrow"`, then the first row of data is used
|
| 679 |
+
- if `headers="keys"`, then dictionary keys or column indices are used
|
| 680 |
+
|
| 681 |
+
Otherwise, a headerless table is produced.
|
| 682 |
+
|
| 683 |
+
If the number of headers is less than the number of columns, they
|
| 684 |
+
are supposed to be names of the last columns. This is consistent
|
| 685 |
+
with the plain-text format of R and Pandas' dataframes.
|
| 686 |
+
|
| 687 |
+
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
|
| 688 |
+
... headers="firstrow"))
|
| 689 |
+
sex age
|
| 690 |
+
----- ----- -----
|
| 691 |
+
Alice F 24
|
| 692 |
+
Bob M 19
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
Column alignment
|
| 696 |
+
----------------
|
| 697 |
+
|
| 698 |
+
`tabulate` tries to detect column types automatically, and aligns
|
| 699 |
+
the values properly. By default, it aligns decimal points of the
|
| 700 |
+
numbers (or flushes integer numbers to the right), and flushes
|
| 701 |
+
everything else to the left. Possible column alignments
|
| 702 |
+
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
|
| 703 |
+
(only for `numalign`), and None (to disable alignment).
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
Table formats
|
| 707 |
+
-------------
|
| 708 |
+
|
| 709 |
+
`floatfmt` is a format specification used for columns which
|
| 710 |
+
contain numeric data with a decimal point.
|
| 711 |
+
|
| 712 |
+
`None` values are replaced with a `missingval` string:
|
| 713 |
+
|
| 714 |
+
>>> print(tabulate([["spam", 1, None],
|
| 715 |
+
... ["eggs", 42, 3.14],
|
| 716 |
+
... ["other", None, 2.7]], missingval="?"))
|
| 717 |
+
----- -- ----
|
| 718 |
+
spam 1 ?
|
| 719 |
+
eggs 42 3.14
|
| 720 |
+
other ? 2.7
|
| 721 |
+
----- -- ----
|
| 722 |
+
|
| 723 |
+
Various plain-text table formats (`tablefmt`) are supported:
|
| 724 |
+
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
|
| 725 |
+
and 'latex'. Variable `tabulate_formats` contains the list of
|
| 726 |
+
currently supported formats.
|
| 727 |
+
|
| 728 |
+
"plain" format doesn't use any pseudographics to draw tables,
|
| 729 |
+
it separates columns with a double space:
|
| 730 |
+
|
| 731 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
|
| 732 |
+
... ["strings", "numbers"], "plain"))
|
| 733 |
+
strings numbers
|
| 734 |
+
spam 41.9999
|
| 735 |
+
eggs 451
|
| 736 |
+
|
| 737 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
|
| 738 |
+
spam 41.9999
|
| 739 |
+
eggs 451
|
| 740 |
+
|
| 741 |
+
"simple" format is like Pandoc simple_tables:
|
| 742 |
+
|
| 743 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
|
| 744 |
+
... ["strings", "numbers"], "simple"))
|
| 745 |
+
strings numbers
|
| 746 |
+
--------- ---------
|
| 747 |
+
spam 41.9999
|
| 748 |
+
eggs 451
|
| 749 |
+
|
| 750 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
|
| 751 |
+
---- --------
|
| 752 |
+
spam 41.9999
|
| 753 |
+
eggs 451
|
| 754 |
+
---- --------
|
| 755 |
+
|
| 756 |
+
"grid" is similar to tables produced by Emacs table.el package or
|
| 757 |
+
Pandoc grid_tables:
|
| 758 |
+
|
| 759 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
|
| 760 |
+
... ["strings", "numbers"], "grid"))
|
| 761 |
+
+-----------+-----------+
|
| 762 |
+
| strings | numbers |
|
| 763 |
+
+===========+===========+
|
| 764 |
+
| spam | 41.9999 |
|
| 765 |
+
+-----------+-----------+
|
| 766 |
+
| eggs | 451 |
|
| 767 |
+
+-----------+-----------+
|
| 768 |
+
|
| 769 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
|
| 770 |
+
+------+----------+
|
| 771 |
+
| spam | 41.9999 |
|
| 772 |
+
+------+----------+
|
| 773 |
+
| eggs | 451 |
|
| 774 |
+
+------+----------+
|
| 775 |
+
|
| 776 |
+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
|
| 777 |
+
pipe_tables:
|
| 778 |
+
|
| 779 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
|
| 780 |
+
... ["strings", "numbers"], "pipe"))
|
| 781 |
+
| strings | numbers |
|
| 782 |
+
|:----------|----------:|
|
| 783 |
+
| spam | 41.9999 |
|
| 784 |
+
| eggs | 451 |
|
| 785 |
+
|
| 786 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|
| 787 |
+
|:-----|---------:|
|
| 788 |
+
| spam | 41.9999 |
|
| 789 |
+
| eggs | 451 |
|
| 790 |
+
|
| 791 |
+
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
|
| 792 |
+
are slightly different from "pipe" format by not using colons to
|
| 793 |
+
define column alignment, and using a "+" sign to indicate line
|
| 794 |
+
intersections:
|
| 795 |
+
|
| 796 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
|
| 797 |
+
... ["strings", "numbers"], "orgtbl"))
|
| 798 |
+
| strings | numbers |
|
| 799 |
+
|-----------+-----------|
|
| 800 |
+
| spam | 41.9999 |
|
| 801 |
+
| eggs | 451 |
|
| 802 |
+
|
| 803 |
+
|
| 804 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
|
| 805 |
+
| spam | 41.9999 |
|
| 806 |
+
| eggs | 451 |
|
| 807 |
+
|
| 808 |
+
"rst" is like a simple table format from reStructuredText; please
|
| 809 |
+
note that reStructuredText accepts also "grid" tables:
|
| 810 |
+
|
| 811 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
|
| 812 |
+
... ["strings", "numbers"], "rst"))
|
| 813 |
+
========= =========
|
| 814 |
+
strings numbers
|
| 815 |
+
========= =========
|
| 816 |
+
spam 41.9999
|
| 817 |
+
eggs 451
|
| 818 |
+
========= =========
|
| 819 |
+
|
| 820 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
|
| 821 |
+
==== ========
|
| 822 |
+
spam 41.9999
|
| 823 |
+
eggs 451
|
| 824 |
+
==== ========
|
| 825 |
+
|
| 826 |
+
"mediawiki" produces a table markup used in Wikipedia and on other
|
| 827 |
+
MediaWiki-based sites:
|
| 828 |
+
|
| 829 |
+
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
|
| 830 |
+
... headers="firstrow", tablefmt="mediawiki"))
|
| 831 |
+
{| class="wikitable" style="text-align: left;"
|
| 832 |
+
|+ <!-- caption -->
|
| 833 |
+
|-
|
| 834 |
+
! strings !! align="right"| numbers
|
| 835 |
+
|-
|
| 836 |
+
| spam || align="right"| 41.9999
|
| 837 |
+
|-
|
| 838 |
+
| eggs || align="right"| 451
|
| 839 |
+
|}
|
| 840 |
+
|
| 841 |
+
"latex" produces a tabular environment of LaTeX document markup:
|
| 842 |
+
|
| 843 |
+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
|
| 844 |
+
\\begin{tabular}{lr}
|
| 845 |
+
\\hline
|
| 846 |
+
spam & 41.9999 \\\\
|
| 847 |
+
eggs & 451 \\\\
|
| 848 |
+
\\hline
|
| 849 |
+
\\end{tabular}
|
| 850 |
+
|
| 851 |
+
"""
|
| 852 |
+
|
| 853 |
+
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
|
| 854 |
+
|
| 855 |
+
# optimization: look for ANSI control codes once,
|
| 856 |
+
# enable smart width functions only if a control code is found
|
| 857 |
+
plain_text = "\n".join(
|
| 858 |
+
["\t".join(map(_text_type, headers))]
|
| 859 |
+
+ ["\t".join(map(_text_type, row)) for row in list_of_lists]
|
| 860 |
+
)
|
| 861 |
+
has_invisible = re.search(_invisible_codes, plain_text)
|
| 862 |
+
if has_invisible:
|
| 863 |
+
width_fn = _visible_width
|
| 864 |
+
else:
|
| 865 |
+
width_fn = len
|
| 866 |
+
|
| 867 |
+
# format rows and columns, convert numeric values to strings
|
| 868 |
+
cols = list(zip(*list_of_lists))
|
| 869 |
+
coltypes = list(map(_column_type, cols))
|
| 870 |
+
cols = [
|
| 871 |
+
[_format(v, ct, floatfmt, missingval) for v in c]
|
| 872 |
+
for c, ct in zip(cols, coltypes)
|
| 873 |
+
]
|
| 874 |
+
|
| 875 |
+
# align columns
|
| 876 |
+
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
|
| 877 |
+
minwidths = [width_fn(h) + 2 for h in headers] if headers else [0] * len(cols)
|
| 878 |
+
cols = [
|
| 879 |
+
_align_column(c, a, minw, has_invisible)
|
| 880 |
+
for c, a, minw in zip(cols, aligns, minwidths)
|
| 881 |
+
]
|
| 882 |
+
|
| 883 |
+
if headers:
|
| 884 |
+
# align headers and add headers
|
| 885 |
+
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
|
| 886 |
+
headers = [
|
| 887 |
+
_align_header(h, a, minw) for h, a, minw in zip(headers, aligns, minwidths)
|
| 888 |
+
]
|
| 889 |
+
rows = list(zip(*cols))
|
| 890 |
+
else:
|
| 891 |
+
minwidths = [width_fn(c[0]) for c in cols]
|
| 892 |
+
rows = list(zip(*cols))
|
| 893 |
+
|
| 894 |
+
if not isinstance(tablefmt, TableFormat):
|
| 895 |
+
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
|
| 896 |
+
|
| 897 |
+
return _format_table(tablefmt, headers, rows, minwidths, aligns)
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
def _build_simple_row(padded_cells, rowfmt):
|
| 901 |
+
"Format row according to DataRow format without padding."
|
| 902 |
+
begin, sep, end = rowfmt
|
| 903 |
+
return (begin + sep.join(padded_cells) + end).rstrip()
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
|
| 907 |
+
"Return a string which represents a row of data cells."
|
| 908 |
+
if not rowfmt:
|
| 909 |
+
return None
|
| 910 |
+
if hasattr(rowfmt, "__call__"):
|
| 911 |
+
return rowfmt(padded_cells, colwidths, colaligns)
|
| 912 |
+
else:
|
| 913 |
+
return _build_simple_row(padded_cells, rowfmt)
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
def _build_line(colwidths, colaligns, linefmt):
|
| 917 |
+
"Return a string which represents a horizontal line."
|
| 918 |
+
if not linefmt:
|
| 919 |
+
return None
|
| 920 |
+
if hasattr(linefmt, "__call__"):
|
| 921 |
+
return linefmt(colwidths, colaligns)
|
| 922 |
+
else:
|
| 923 |
+
begin, fill, sep, end = linefmt
|
| 924 |
+
cells = [fill * w for w in colwidths]
|
| 925 |
+
return _build_simple_row(cells, (begin, sep, end))
|
| 926 |
+
|
| 927 |
+
|
| 928 |
+
def _pad_row(cells, padding):
|
| 929 |
+
if cells:
|
| 930 |
+
pad = " " * padding
|
| 931 |
+
padded_cells = [pad + cell + pad for cell in cells]
|
| 932 |
+
return padded_cells
|
| 933 |
+
else:
|
| 934 |
+
return cells
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
def _format_table(fmt, headers, rows, colwidths, colaligns):
|
| 938 |
+
"""Produce a plain-text representation of the table."""
|
| 939 |
+
lines = []
|
| 940 |
+
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
|
| 941 |
+
pad = fmt.padding
|
| 942 |
+
headerrow = fmt.headerrow
|
| 943 |
+
|
| 944 |
+
padded_widths = [(w + 2 * pad) for w in colwidths]
|
| 945 |
+
padded_headers = _pad_row(headers, pad)
|
| 946 |
+
padded_rows = [_pad_row(row, pad) for row in rows]
|
| 947 |
+
|
| 948 |
+
if fmt.lineabove and "lineabove" not in hidden:
|
| 949 |
+
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
|
| 950 |
+
|
| 951 |
+
if padded_headers:
|
| 952 |
+
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
|
| 953 |
+
if fmt.linebelowheader and "linebelowheader" not in hidden:
|
| 954 |
+
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
|
| 955 |
+
|
| 956 |
+
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
|
| 957 |
+
# initial rows with a line below
|
| 958 |
+
for row in padded_rows[:-1]:
|
| 959 |
+
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
|
| 960 |
+
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
|
| 961 |
+
# the last row without a line below
|
| 962 |
+
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
|
| 963 |
+
else:
|
| 964 |
+
for row in padded_rows:
|
| 965 |
+
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
|
| 966 |
+
|
| 967 |
+
if fmt.linebelow and "linebelow" not in hidden:
|
| 968 |
+
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
|
| 969 |
+
|
| 970 |
+
return "\n".join(lines)
|
testbed/pgmpy__pgmpy/pgmpy/factors/FactorDict.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
from numbers import Number
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from sklearn.preprocessing import OrdinalEncoder
|
| 8 |
+
|
| 9 |
+
from pgmpy.factors.base import factor_product
|
| 10 |
+
from pgmpy.factors.discrete import DiscreteFactor
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class FactorDict(dict):
|
| 14 |
+
@classmethod
|
| 15 |
+
def from_dataframe(cls, df, marginals):
|
| 16 |
+
"""Create a `FactorDict` from a given set of marginals.
|
| 17 |
+
|
| 18 |
+
Parameters
|
| 19 |
+
----------
|
| 20 |
+
df: pandas DataFrame object
|
| 21 |
+
|
| 22 |
+
marginals: List[Tuple[str]]
|
| 23 |
+
List of Tuples containing the names of the marginals.
|
| 24 |
+
|
| 25 |
+
Returns
|
| 26 |
+
-------
|
| 27 |
+
Factor dictionary: FactorDict
|
| 28 |
+
FactorDict with each marginal's Factor representing the empirical
|
| 29 |
+
frequency of the marginal from the dataset.
|
| 30 |
+
"""
|
| 31 |
+
if df.isnull().values.any():
|
| 32 |
+
raise ValueError("df cannot contain None or np.nan values.")
|
| 33 |
+
|
| 34 |
+
factor_dict = cls({})
|
| 35 |
+
for marginal in marginals:
|
| 36 |
+
# Subset of columns arranged in a lexographical ordering.
|
| 37 |
+
_df = df.loc[:, list(marginal)].sort_values(list(marginal))
|
| 38 |
+
cardinality = list(_df.nunique())
|
| 39 |
+
# Since we have sorted the columns, this encoding will
|
| 40 |
+
# also be sorted lexographically.
|
| 41 |
+
encoded = OrdinalEncoder().fit_transform(_df)
|
| 42 |
+
factor_dict[marginal] = DiscreteFactor(
|
| 43 |
+
variables=marginal,
|
| 44 |
+
cardinality=cardinality,
|
| 45 |
+
values=np.histogramdd(sample=encoded, bins=cardinality)[0].flatten(),
|
| 46 |
+
state_names={
|
| 47 |
+
column: sorted(_df[column].unique().tolist()) for column in marginal
|
| 48 |
+
},
|
| 49 |
+
)
|
| 50 |
+
return factor_dict
|
| 51 |
+
|
| 52 |
+
def get_factors(self):
|
| 53 |
+
return set(self.values())
|
| 54 |
+
|
| 55 |
+
def __mul__(self, const):
|
| 56 |
+
return FactorDict({clique: const * self[clique] for clique in self})
|
| 57 |
+
|
| 58 |
+
def __rmul__(self, const):
|
| 59 |
+
return self.__mul__(const)
|
| 60 |
+
|
| 61 |
+
def __add__(self, other):
|
| 62 |
+
return FactorDict(
|
| 63 |
+
{clique: self[clique] + other for clique in self}
|
| 64 |
+
if isinstance(other, Number)
|
| 65 |
+
else {clique: self[clique] + other[clique] for clique in self}
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def __sub__(self, other):
|
| 69 |
+
return self + -1 * other
|
| 70 |
+
|
| 71 |
+
def dot(self, other):
|
| 72 |
+
return sum((self[clique] * other[clique]).values.sum() for clique in self)
|
| 73 |
+
|
| 74 |
+
def product(self):
|
| 75 |
+
return factor_product(*self.get_factors())
|
testbed/pgmpy__pgmpy/pgmpy/factors/FactorSet.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
from functools import reduce
|
| 4 |
+
|
| 5 |
+
from pgmpy.factors.base import BaseFactor
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class FactorSet(object):
|
| 9 |
+
r"""
|
| 10 |
+
Base class of *DiscreteFactor Sets*.
|
| 11 |
+
|
| 12 |
+
A factor set provides a compact representation of higher dimensional factor
|
| 13 |
+
:math:`\phi_1\cdot\phi_2\cdots\phi_n`
|
| 14 |
+
|
| 15 |
+
For example the factor set corresponding to factor :math:`\phi_1\cdot\phi_2` would be the union of the factors
|
| 16 |
+
:math:`\phi_1` and :math:`\phi_2` i.e. factor set :math:`\vec\phi = \phi_1 \cup \phi_2`.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, *factors_list):
|
| 20 |
+
"""
|
| 21 |
+
Initialize the factor set class.
|
| 22 |
+
|
| 23 |
+
Parameters
|
| 24 |
+
----------
|
| 25 |
+
factors_list: Factor1, Factor2, ....
|
| 26 |
+
All the factors whose product is represented by the factor set
|
| 27 |
+
|
| 28 |
+
Examples
|
| 29 |
+
--------
|
| 30 |
+
>>> from pgmpy.factors import FactorSet
|
| 31 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 32 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 33 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 34 |
+
>>> factor_set = FactorSet(phi1, phi2)
|
| 35 |
+
>>> factor_set
|
| 36 |
+
<pgmpy.factors.FactorSet.FactorSet at 0x7f8e32af6d50>
|
| 37 |
+
>>> print(factor_set)
|
| 38 |
+
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c2d0>,
|
| 39 |
+
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4c710>])
|
| 40 |
+
"""
|
| 41 |
+
if not all(isinstance(phi, BaseFactor) for phi in factors_list):
|
| 42 |
+
raise TypeError("Input parameters must be child classes of BaseFactor")
|
| 43 |
+
self.factors = set([factor.copy() for factor in factors_list])
|
| 44 |
+
|
| 45 |
+
def add_factors(self, *factors):
|
| 46 |
+
"""
|
| 47 |
+
Adds factors to the factor set.
|
| 48 |
+
|
| 49 |
+
Parameters
|
| 50 |
+
----------
|
| 51 |
+
factors: Factor1, Factor2, ...., Factorn
|
| 52 |
+
factors to be added into the factor set
|
| 53 |
+
|
| 54 |
+
Examples
|
| 55 |
+
--------
|
| 56 |
+
>>> from pgmpy.factors import FactorSet
|
| 57 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 58 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 59 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 60 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 61 |
+
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
|
| 62 |
+
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
|
| 63 |
+
>>> factor_set1.add_factors(phi3, phi4)
|
| 64 |
+
>>> print(factor_set1)
|
| 65 |
+
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4ca10>,
|
| 66 |
+
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e4c393690>,
|
| 67 |
+
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4c750>,
|
| 68 |
+
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cb50>])
|
| 69 |
+
"""
|
| 70 |
+
self.factors.update(factors)
|
| 71 |
+
|
| 72 |
+
def remove_factors(self, *factors):
|
| 73 |
+
"""
|
| 74 |
+
Removes factors from the factor set.
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
factors: Factor1, Factor2, ...., Factorn
|
| 79 |
+
factors to be removed from the factor set
|
| 80 |
+
|
| 81 |
+
Examples
|
| 82 |
+
--------
|
| 83 |
+
>>> from pgmpy.factors import FactorSet
|
| 84 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 85 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 86 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 87 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 88 |
+
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
|
| 89 |
+
>>> factor_set1.add_factors(phi3)
|
| 90 |
+
>>> print(factor_set1)
|
| 91 |
+
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
|
| 92 |
+
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b250>,
|
| 93 |
+
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5b150>])
|
| 94 |
+
>>> factor_set1.remove_factors(phi1, phi2)
|
| 95 |
+
>>> print(factor_set1)
|
| 96 |
+
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4cb10>])
|
| 97 |
+
"""
|
| 98 |
+
for factor in factors:
|
| 99 |
+
self.factors.remove(factor)
|
| 100 |
+
|
| 101 |
+
def get_factors(self):
|
| 102 |
+
"""
|
| 103 |
+
Returns all the factors present in factor set.
|
| 104 |
+
|
| 105 |
+
Examples
|
| 106 |
+
--------
|
| 107 |
+
>>> from pgmpy.factors import FactorSet
|
| 108 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 109 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 110 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 111 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 112 |
+
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
|
| 113 |
+
>>> factor_set1.add_factors(phi3)
|
| 114 |
+
>>> factor_set1.get_factors()
|
| 115 |
+
{<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f827c0a23c8>,
|
| 116 |
+
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f827c0a2358>,
|
| 117 |
+
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f825243f9e8>}
|
| 118 |
+
"""
|
| 119 |
+
return self.factors
|
| 120 |
+
|
| 121 |
+
def product(self, factorset, inplace=True):
|
| 122 |
+
r"""
|
| 123 |
+
Return the factor sets product with the given factor sets
|
| 124 |
+
|
| 125 |
+
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors
|
| 126 |
+
set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
|
| 127 |
+
|
| 128 |
+
Parameters
|
| 129 |
+
----------
|
| 130 |
+
factorsets: FactorSet1, FactorSet2, ..., FactorSetn
|
| 131 |
+
FactorSets to be multiplied
|
| 132 |
+
|
| 133 |
+
inplace: A boolean (Default value True)
|
| 134 |
+
If inplace = True , then it will modify the FactorSet object, if False, it will
|
| 135 |
+
return a new FactorSet object.
|
| 136 |
+
|
| 137 |
+
Returns
|
| 138 |
+
--------
|
| 139 |
+
If inpalce = False, will return a new FactorSet object, which is product of two factors
|
| 140 |
+
|
| 141 |
+
Examples
|
| 142 |
+
--------
|
| 143 |
+
>>> from pgmpy.factors import FactorSet
|
| 144 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 145 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 146 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 147 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 148 |
+
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
|
| 149 |
+
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
|
| 150 |
+
>>> factor_set2 = FactorSet(phi3, phi4)
|
| 151 |
+
>>> print(factor_set2)
|
| 152 |
+
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
|
| 153 |
+
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
|
| 154 |
+
>>> factor_set2.product(factor_set1)
|
| 155 |
+
>>> print(factor_set2)
|
| 156 |
+
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c910>,
|
| 157 |
+
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cc50>,
|
| 158 |
+
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
|
| 159 |
+
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
|
| 160 |
+
>>> factor_set2 = FactorSet(phi3, phi4)
|
| 161 |
+
>>> factor_set3 = factor_set2.product(factor_set1, inplace=False)
|
| 162 |
+
>>> print(factor_set2)
|
| 163 |
+
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b060>,
|
| 164 |
+
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b790>])
|
| 165 |
+
"""
|
| 166 |
+
factor_set = self if inplace else self.copy()
|
| 167 |
+
factor_set1 = factorset.copy()
|
| 168 |
+
|
| 169 |
+
factor_set.add_factors(*factor_set1.factors)
|
| 170 |
+
|
| 171 |
+
if not inplace:
|
| 172 |
+
return factor_set
|
| 173 |
+
|
| 174 |
+
def divide(self, factorset, inplace=True):
|
| 175 |
+
r"""
|
| 176 |
+
Returns a new factor set instance after division by the factor set
|
| 177 |
+
|
| 178 |
+
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the
|
| 179 |
+
factors present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in
|
| 180 |
+
:math:`\vec\phi_2`.
|
| 181 |
+
|
| 182 |
+
Parameters
|
| 183 |
+
----------
|
| 184 |
+
factorset: FactorSet
|
| 185 |
+
The divisor
|
| 186 |
+
|
| 187 |
+
inplace: A boolean (Default value True)
|
| 188 |
+
If inplace = True ,then it will modify the FactorSet object, if False then will
|
| 189 |
+
return a new FactorSet object.
|
| 190 |
+
|
| 191 |
+
Returns
|
| 192 |
+
--------
|
| 193 |
+
If inplace = False, will return a new FactorSet Object which is division of
|
| 194 |
+
given factors.
|
| 195 |
+
|
| 196 |
+
Examples
|
| 197 |
+
--------
|
| 198 |
+
>>> from pgmpy.factors import FactorSet
|
| 199 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 200 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 201 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 202 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 203 |
+
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
|
| 204 |
+
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
|
| 205 |
+
>>> factor_set2 = FactorSet(phi3, phi4)
|
| 206 |
+
>>> factor_set3 = factor_set2.divide(factor_set1)
|
| 207 |
+
>>> print(factor_set3)
|
| 208 |
+
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5ba10>,
|
| 209 |
+
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b650>,
|
| 210 |
+
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
|
| 211 |
+
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b8d0>])
|
| 212 |
+
"""
|
| 213 |
+
factor_set = self if inplace else self.copy()
|
| 214 |
+
factor_set1 = factorset.copy()
|
| 215 |
+
|
| 216 |
+
factor_set.add_factors(
|
| 217 |
+
*[phi.identity_factor() / phi for phi in factor_set1.factors]
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
if not inplace:
|
| 221 |
+
return factor_set
|
| 222 |
+
|
| 223 |
+
def marginalize(self, variables, inplace=True):
|
| 224 |
+
"""
|
| 225 |
+
Marginalizes the factors present in the factor sets with respect to the given variables.
|
| 226 |
+
|
| 227 |
+
Parameters
|
| 228 |
+
----------
|
| 229 |
+
variables: list, array-like
|
| 230 |
+
List of the variables to be marginalized.
|
| 231 |
+
|
| 232 |
+
inplace: boolean (Default value True)
|
| 233 |
+
If inplace=True it will modify the factor set itself, would create a new factor set
|
| 234 |
+
|
| 235 |
+
Returns
|
| 236 |
+
-------
|
| 237 |
+
If inplace = False, will return a new marginalized FactorSet object.
|
| 238 |
+
|
| 239 |
+
Examples
|
| 240 |
+
--------
|
| 241 |
+
>>> from pgmpy.factors import FactorSet
|
| 242 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 243 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 244 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 245 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 246 |
+
>>> factor_set1.marginalize('x1')
|
| 247 |
+
>>> print(factor_set1)
|
| 248 |
+
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
|
| 249 |
+
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>])
|
| 250 |
+
"""
|
| 251 |
+
if isinstance(variables, str):
|
| 252 |
+
raise TypeError("Expected list or array-like type got type str")
|
| 253 |
+
|
| 254 |
+
factor_set = self if inplace else self.copy()
|
| 255 |
+
|
| 256 |
+
factors_to_be_marginalized = set(
|
| 257 |
+
filter(lambda x: set(x.scope()).intersection(variables), factor_set.factors)
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
for factor in factors_to_be_marginalized:
|
| 261 |
+
variables_to_be_marginalized = list(
|
| 262 |
+
set(factor.scope()).intersection(variables)
|
| 263 |
+
)
|
| 264 |
+
if inplace:
|
| 265 |
+
factor.marginalize(variables_to_be_marginalized, inplace=True)
|
| 266 |
+
else:
|
| 267 |
+
factor_set.remove_factors(factor)
|
| 268 |
+
factor_set.add_factors(
|
| 269 |
+
factor.marginalize(variables_to_be_marginalized, inplace=False)
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
if not inplace:
|
| 273 |
+
return factor_set
|
| 274 |
+
|
| 275 |
+
def __mul__(self, other):
|
| 276 |
+
return self.product(other)
|
| 277 |
+
|
| 278 |
+
def __truediv__(self, other):
|
| 279 |
+
return self.divide(other)
|
| 280 |
+
|
| 281 |
+
def __str__(self):
|
| 282 |
+
return self.factors.__str__()
|
| 283 |
+
|
| 284 |
+
def copy(self):
|
| 285 |
+
"""
|
| 286 |
+
Create a copy of factor set.
|
| 287 |
+
|
| 288 |
+
Examples
|
| 289 |
+
--------
|
| 290 |
+
>>> from pgmpy.factors import FactorSet
|
| 291 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 292 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 293 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 294 |
+
>>> factor_set = FactorSet(phi1, phi2)
|
| 295 |
+
>>> factor_set
|
| 296 |
+
<pgmpy.factors.FactorSet.FactorSet at 0x7fa68f390320>
|
| 297 |
+
>>> factor_set_copy = factor_set.copy()
|
| 298 |
+
>>> factor_set_copy
|
| 299 |
+
<pgmpy.factors.FactorSet.FactorSet at 0x7f91a0031160>
|
| 300 |
+
"""
|
| 301 |
+
# No need to have copies of factors as argument because __init__ method creates copies.
|
| 302 |
+
return FactorSet(*self.factors)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def factorset_product(*factorsets_list):
|
| 306 |
+
r"""
|
| 307 |
+
Base method used for product of factor sets.
|
| 308 |
+
|
| 309 |
+
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is another factors set
|
| 310 |
+
:math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
|
| 311 |
+
|
| 312 |
+
Parameters
|
| 313 |
+
----------
|
| 314 |
+
factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn
|
| 315 |
+
All the factor sets to be multiplied
|
| 316 |
+
|
| 317 |
+
Returns
|
| 318 |
+
-------
|
| 319 |
+
Product of factorset in factorsets_list
|
| 320 |
+
|
| 321 |
+
Examples
|
| 322 |
+
--------
|
| 323 |
+
>>> from pgmpy.factors import FactorSet
|
| 324 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 325 |
+
>>> from pgmpy.factors import factorset_product
|
| 326 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 327 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 328 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 329 |
+
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
|
| 330 |
+
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
|
| 331 |
+
>>> factor_set2 = FactorSet(phi3, phi4)
|
| 332 |
+
>>> factor_set3 = factorset_product(factor_set1, factor_set2)
|
| 333 |
+
>>> print(factor_set3)
|
| 334 |
+
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>,
|
| 335 |
+
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>,
|
| 336 |
+
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>,
|
| 337 |
+
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>])
|
| 338 |
+
|
| 339 |
+
"""
|
| 340 |
+
if not all(isinstance(factorset, FactorSet) for factorset in factorsets_list):
|
| 341 |
+
raise TypeError("Input parameters must be FactorSet instances")
|
| 342 |
+
return reduce(lambda x, y: x.product(y, inplace=False), factorsets_list)
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def factorset_divide(factorset1, factorset2):
|
| 346 |
+
r"""
|
| 347 |
+
Base method for dividing two factor sets.
|
| 348 |
+
|
| 349 |
+
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors
|
| 350 |
+
present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`.
|
| 351 |
+
|
| 352 |
+
Parameters
|
| 353 |
+
----------
|
| 354 |
+
factorset1: FactorSet
|
| 355 |
+
The dividend
|
| 356 |
+
|
| 357 |
+
factorset2: FactorSet
|
| 358 |
+
The divisor
|
| 359 |
+
|
| 360 |
+
Returns
|
| 361 |
+
-------
|
| 362 |
+
The division of factorset1 and factorset2
|
| 363 |
+
|
| 364 |
+
Examples
|
| 365 |
+
--------
|
| 366 |
+
>>> from pgmpy.factors import FactorSet
|
| 367 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 368 |
+
>>> from pgmpy.factors import factorset_divide
|
| 369 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 370 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 371 |
+
>>> factor_set1 = FactorSet(phi1, phi2)
|
| 372 |
+
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
|
| 373 |
+
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
|
| 374 |
+
>>> factor_set2 = FactorSet(phi3, phi4)
|
| 375 |
+
>>> factor_set3 = factorset_divide(factor_set2, factor_set1)
|
| 376 |
+
>>> print(factor_set3)
|
| 377 |
+
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>,
|
| 378 |
+
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>,
|
| 379 |
+
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>,
|
| 380 |
+
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>])
|
| 381 |
+
|
| 382 |
+
"""
|
| 383 |
+
if not isinstance(factorset1, FactorSet) or not isinstance(factorset2, FactorSet):
|
| 384 |
+
raise TypeError("factorset1 and factorset2 must be FactorSet instances")
|
| 385 |
+
return factorset1.divide(factorset2, inplace=False)
|
testbed/pgmpy__pgmpy/pgmpy/factors/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base import factor_divide, factor_product, factor_sum_product
|
| 2 |
+
from .FactorSet import FactorSet, factorset_divide, factorset_product
|
| 3 |
+
from .FactorDict import FactorDict
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
"FactorSet",
|
| 7 |
+
"factorset_divide",
|
| 8 |
+
"factorset_product",
|
| 9 |
+
"factor_product",
|
| 10 |
+
"factor_divide",
|
| 11 |
+
"factor_sum_product",
|
| 12 |
+
"FactorDict",
|
| 13 |
+
]
|
testbed/pgmpy__pgmpy/pgmpy/factors/base.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import abstractmethod
|
| 2 |
+
from functools import reduce
|
| 3 |
+
from itertools import chain
|
| 4 |
+
|
| 5 |
+
from opt_einsum import contract
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class BaseFactor(object):
|
| 9 |
+
"""
|
| 10 |
+
Base class for Factors. Any Factor implementation should inherit this class.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, *args, **kwargs):
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
@abstractmethod
|
| 17 |
+
def is_valid_cpd(self):
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def factor_product(*args):
|
| 22 |
+
"""
|
| 23 |
+
Returns factor product over `args`.
|
| 24 |
+
|
| 25 |
+
Parameters
|
| 26 |
+
----------
|
| 27 |
+
args: `BaseFactor` instances.
|
| 28 |
+
factors to be multiplied
|
| 29 |
+
|
| 30 |
+
Returns
|
| 31 |
+
-------
|
| 32 |
+
BaseFactor: `BaseFactor` representing factor product over all the `BaseFactor` instances in args.
|
| 33 |
+
|
| 34 |
+
Examples
|
| 35 |
+
--------
|
| 36 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 37 |
+
>>> from pgmpy.factors import factor_product
|
| 38 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 39 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 40 |
+
>>> phi = factor_product(phi1, phi2)
|
| 41 |
+
>>> phi.variables
|
| 42 |
+
['x1', 'x2', 'x3', 'x4']
|
| 43 |
+
>>> phi.cardinality
|
| 44 |
+
array([2, 3, 2, 2])
|
| 45 |
+
>>> phi.values
|
| 46 |
+
array([[[[ 0, 0],
|
| 47 |
+
[ 4, 6]],
|
| 48 |
+
|
| 49 |
+
[[ 0, 4],
|
| 50 |
+
[12, 18]],
|
| 51 |
+
|
| 52 |
+
[[ 0, 8],
|
| 53 |
+
[20, 30]]],
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
[[[ 6, 18],
|
| 57 |
+
[35, 49]],
|
| 58 |
+
|
| 59 |
+
[[ 8, 24],
|
| 60 |
+
[45, 63]],
|
| 61 |
+
|
| 62 |
+
[[10, 30],
|
| 63 |
+
[55, 77]]]])
|
| 64 |
+
"""
|
| 65 |
+
if not all(isinstance(phi, BaseFactor) for phi in args):
|
| 66 |
+
raise TypeError("Arguments must be factors")
|
| 67 |
+
# Check if all of the arguments are of the same type
|
| 68 |
+
elif len(set(map(type, args))) != 1:
|
| 69 |
+
raise NotImplementedError(
|
| 70 |
+
"All the args are expected to be instances of the same factor class."
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
if len(args) == 1:
|
| 74 |
+
return args[0].copy()
|
| 75 |
+
else:
|
| 76 |
+
return reduce(lambda phi1, phi2: phi1 * phi2, args)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def factor_sum_product(output_vars, factors):
|
| 80 |
+
"""
|
| 81 |
+
For a given set of factors: `args` returns the result of $ \sum_{var \not \in output_vars} \prod \textit{args} $.
|
| 82 |
+
|
| 83 |
+
Parameters
|
| 84 |
+
----------
|
| 85 |
+
output_vars: list, iterable
|
| 86 |
+
List of variable names on which the output factor is to be defined. Variable which are present in any of the factors
|
| 87 |
+
but not in output_vars will be marginalized out.
|
| 88 |
+
|
| 89 |
+
factors: list, iterable
|
| 90 |
+
List of DiscreteFactor objects on which to perform the sum product operation.
|
| 91 |
+
|
| 92 |
+
Returns
|
| 93 |
+
-------
|
| 94 |
+
pgmpy.factor.discrete.DiscreteFactor: A DiscreteFactor object on `output_vars`.
|
| 95 |
+
|
| 96 |
+
Examples
|
| 97 |
+
--------
|
| 98 |
+
>>> from pgmpy.factors import factor_sum_product
|
| 99 |
+
>>> from pgmpy.utils import get_example_model
|
| 100 |
+
>>> factors = [cpd.to_factor() for cpd in model.cpds]
|
| 101 |
+
>>> factor_sum_product(output_vars=['HISTORY'], factors=factors)
|
| 102 |
+
<DiscreteFactor representing phi(HISTORY:2) at 0x7f240556b970>
|
| 103 |
+
"""
|
| 104 |
+
state_names = {}
|
| 105 |
+
for phi in factors:
|
| 106 |
+
state_names.update(phi.state_names)
|
| 107 |
+
|
| 108 |
+
einsum_expr = []
|
| 109 |
+
for phi in factors:
|
| 110 |
+
einsum_expr.append(phi.values)
|
| 111 |
+
einsum_expr.append(phi.variables)
|
| 112 |
+
values = contract(*einsum_expr, output_vars, optimize="greedy")
|
| 113 |
+
|
| 114 |
+
from pgmpy.factors.discrete import DiscreteFactor
|
| 115 |
+
|
| 116 |
+
return DiscreteFactor(
|
| 117 |
+
variables=output_vars,
|
| 118 |
+
cardinality=values.shape,
|
| 119 |
+
values=values,
|
| 120 |
+
state_names={var: state_names[var] for var in output_vars},
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def factor_divide(phi1, phi2):
|
| 125 |
+
"""
|
| 126 |
+
Returns `DiscreteFactor` representing `phi1 / phi2`.
|
| 127 |
+
|
| 128 |
+
Parameters
|
| 129 |
+
----------
|
| 130 |
+
phi1: Factor
|
| 131 |
+
The Dividend.
|
| 132 |
+
|
| 133 |
+
phi2: Factor
|
| 134 |
+
The Divisor.
|
| 135 |
+
|
| 136 |
+
Returns
|
| 137 |
+
-------
|
| 138 |
+
DiscreteFactor: `DiscreteFactor` representing factor division `phi1 / phi2`.
|
| 139 |
+
|
| 140 |
+
Examples
|
| 141 |
+
--------
|
| 142 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 143 |
+
>>> from pgmpy.factors import factor_product
|
| 144 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 145 |
+
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5))
|
| 146 |
+
>>> phi = factor_divide(phi1, phi2)
|
| 147 |
+
>>> phi.variables
|
| 148 |
+
['x1', 'x2', 'x3']
|
| 149 |
+
>>> phi.cardinality
|
| 150 |
+
array([2, 3, 2])
|
| 151 |
+
>>> phi.values
|
| 152 |
+
array([[[ 0. , 0.33333333],
|
| 153 |
+
[ 2. , 1. ],
|
| 154 |
+
[ 4. , 1.66666667]],
|
| 155 |
+
|
| 156 |
+
[[ 3. , 1.75 ],
|
| 157 |
+
[ 4. , 2.25 ],
|
| 158 |
+
[ 5. , 2.75 ]]])
|
| 159 |
+
"""
|
| 160 |
+
if not isinstance(phi1, BaseFactor) or not isinstance(phi2, BaseFactor):
|
| 161 |
+
raise TypeError("phi1 and phi2 should be factors instances")
|
| 162 |
+
|
| 163 |
+
# Check if all of the arguments are of the same type
|
| 164 |
+
elif type(phi1) != type(phi2):
|
| 165 |
+
raise NotImplementedError(
|
| 166 |
+
"All the args are expected to be instances of the same factor class."
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
return phi1.divide(phi2, inplace=False)
|
testbed/pgmpy__pgmpy/pgmpy/factors/continuous/ContinuousFactor.py
ADDED
|
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import types
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import scipy.integrate as integrate
|
| 5 |
+
|
| 6 |
+
from pgmpy.factors.base import BaseFactor
|
| 7 |
+
from pgmpy.factors.distributions import CustomDistribution, GaussianDistribution
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ContinuousFactor(BaseFactor):
|
| 11 |
+
"""
|
| 12 |
+
Base class for factors representing various multivariate
|
| 13 |
+
representations.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, variables, pdf, *args, **kwargs):
|
| 17 |
+
"""
|
| 18 |
+
Parameters
|
| 19 |
+
----------
|
| 20 |
+
variables: list or array-like
|
| 21 |
+
The variables for which the distribution is defined.
|
| 22 |
+
|
| 23 |
+
pdf: function
|
| 24 |
+
The probability density function of the distribution.
|
| 25 |
+
|
| 26 |
+
Examples
|
| 27 |
+
--------
|
| 28 |
+
>>> import numpy as np
|
| 29 |
+
>>> from scipy.special import beta
|
| 30 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 31 |
+
# Two variable dirichlet distribution with alpha = (1,2)
|
| 32 |
+
>>> def dirichlet_pdf(x, y):
|
| 33 |
+
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
|
| 34 |
+
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
|
| 35 |
+
>>> dirichlet_factor.scope()
|
| 36 |
+
['x', 'y']
|
| 37 |
+
>>> dirichlet_factor.assignment(5,6)
|
| 38 |
+
226800.0
|
| 39 |
+
>>> dirichlet_factor.cdf([(0, 0.2), (0, 0.5)])
|
| 40 |
+
8.384972114200703e-05
|
| 41 |
+
"""
|
| 42 |
+
if not isinstance(variables, (list, tuple, np.ndarray)):
|
| 43 |
+
raise TypeError(
|
| 44 |
+
f"variables: Expected type list or array-like, got type {type(variables)}"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
if len(set(variables)) != len(variables):
|
| 48 |
+
raise ValueError("Variable names cannot be same.")
|
| 49 |
+
|
| 50 |
+
variables = list(variables)
|
| 51 |
+
|
| 52 |
+
if isinstance(pdf, str):
|
| 53 |
+
if pdf == "gaussian":
|
| 54 |
+
self.distribution = GaussianDistribution(
|
| 55 |
+
variables=variables,
|
| 56 |
+
mean=kwargs["mean"],
|
| 57 |
+
covariance=kwargs["covariance"],
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
raise NotImplementedError(
|
| 61 |
+
f"{pdf} distribution not supported. Please use CustomDistribution"
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
elif isinstance(pdf, CustomDistribution):
|
| 65 |
+
self.distribution = pdf
|
| 66 |
+
|
| 67 |
+
elif callable(pdf):
|
| 68 |
+
self.distribution = CustomDistribution(
|
| 69 |
+
variables=variables, distribution=pdf
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
else:
|
| 73 |
+
raise ValueError(
|
| 74 |
+
f"pdf: Expected type: str or function, Got: {type(variables)}"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def pdf(self):
|
| 79 |
+
"""
|
| 80 |
+
Returns the pdf of the ContinuousFactor.
|
| 81 |
+
"""
|
| 82 |
+
return self.distribution.pdf
|
| 83 |
+
|
| 84 |
+
@property
|
| 85 |
+
def variable(self):
|
| 86 |
+
return self.scope()[0]
|
| 87 |
+
|
| 88 |
+
def cdf(self, limits):
|
| 89 |
+
"""
|
| 90 |
+
Returns the value of the cumulative distribution function for a multivariate
|
| 91 |
+
distribution over the given limits.
|
| 92 |
+
|
| 93 |
+
Parameters
|
| 94 |
+
----------
|
| 95 |
+
limits : list of tuples
|
| 96 |
+
Each tuple contains the lower and upper integration limits for each variable.
|
| 97 |
+
For example, limits for a bivariate distribution could be [(-np.inf, x1), (-np.inf, x2)].
|
| 98 |
+
|
| 99 |
+
Returns
|
| 100 |
+
-------
|
| 101 |
+
float
|
| 102 |
+
The cumulative probability within the specified limits.
|
| 103 |
+
|
| 104 |
+
Examples
|
| 105 |
+
--------
|
| 106 |
+
>>> from scipy.stats import norm, multivariate_normal
|
| 107 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 108 |
+
>>> pdf = lambda x, y: multivariate_normal.pdf([x, y], mean=[0,0], cov=[[1, 0.5], [0.5, 1]])
|
| 109 |
+
>>> factor = ContinuousFactor(['x', 'y'], pdf)
|
| 110 |
+
>>> factor.cdf([(-np.inf, 1), (-np.inf, 1)])
|
| 111 |
+
0.7452035867990542
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
if len(limits) != len(self.distribution.variables):
|
| 115 |
+
raise ValueError(
|
| 116 |
+
"Limits should have the same length as the number of variables."
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
return integrate.nquad(self.pdf, limits)[0]
|
| 120 |
+
|
| 121 |
+
def scope(self):
|
| 122 |
+
"""
|
| 123 |
+
Returns the scope of the factor.
|
| 124 |
+
|
| 125 |
+
Returns
|
| 126 |
+
-------
|
| 127 |
+
list: List of variable names in the scope of the factor.
|
| 128 |
+
|
| 129 |
+
Examples
|
| 130 |
+
--------
|
| 131 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 132 |
+
>>> from scipy.stats import multivariate_normal
|
| 133 |
+
>>> normal_pdf = lambda x: multivariate_normal(x, [0, 0], [[1, 0], [0, 1]])
|
| 134 |
+
>>> phi = ContinuousFactor(['x1', 'x2'], normal_pdf)
|
| 135 |
+
>>> phi.scope()
|
| 136 |
+
['x1', 'x2']
|
| 137 |
+
"""
|
| 138 |
+
return self.distribution.variables
|
| 139 |
+
|
| 140 |
+
def get_evidence(self):
|
| 141 |
+
return self.scope()[1:]
|
| 142 |
+
|
| 143 |
+
def assignment(self, *args):
|
| 144 |
+
"""
|
| 145 |
+
Returns a list of pdf assignments for the corresponding values.
|
| 146 |
+
|
| 147 |
+
Parameters
|
| 148 |
+
----------
|
| 149 |
+
*args: values
|
| 150 |
+
Values whose assignment is to be computed.
|
| 151 |
+
|
| 152 |
+
Examples
|
| 153 |
+
--------
|
| 154 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 155 |
+
>>> from scipy.stats import multivariate_normal
|
| 156 |
+
>>> normal_pdf = lambda x1, x2: multivariate_normal.pdf((x1, x2), [0, 0], [[1, 0], [0, 1]])
|
| 157 |
+
>>> phi = ContinuousFactor(['x1', 'x2'], normal_pdf)
|
| 158 |
+
>>> phi.assignment(1, 2)
|
| 159 |
+
0.013064233284684921
|
| 160 |
+
"""
|
| 161 |
+
return self.distribution.assignment(*args)
|
| 162 |
+
|
| 163 |
+
def copy(self):
|
| 164 |
+
"""
|
| 165 |
+
Return a copy of the distribution.
|
| 166 |
+
|
| 167 |
+
Returns
|
| 168 |
+
-------
|
| 169 |
+
ContinuousFactor object: copy of the distribution
|
| 170 |
+
|
| 171 |
+
Examples
|
| 172 |
+
--------
|
| 173 |
+
>>> import numpy as np
|
| 174 |
+
>>> from scipy.special import beta
|
| 175 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 176 |
+
# Two variable dirichlet distribution with alpha = (1,2)
|
| 177 |
+
>>> def dirichlet_pdf(x, y):
|
| 178 |
+
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
|
| 179 |
+
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
|
| 180 |
+
>>> dirichlet_factor.variables
|
| 181 |
+
['x', 'y']
|
| 182 |
+
>>> copy_factor = dirichlet_factor.copy()
|
| 183 |
+
>>> copy_factor.variables
|
| 184 |
+
['x', 'y']
|
| 185 |
+
"""
|
| 186 |
+
return ContinuousFactor(self.scope(), self.distribution.copy())
|
| 187 |
+
|
| 188 |
+
def discretize(self, method, *args, **kwargs):
|
| 189 |
+
"""
|
| 190 |
+
Discretizes the continuous distribution into discrete
|
| 191 |
+
probability masses using various methods.
|
| 192 |
+
|
| 193 |
+
Parameters
|
| 194 |
+
----------
|
| 195 |
+
method : A Discretizer Class from pgmpy.discretize
|
| 196 |
+
|
| 197 |
+
*args, **kwargs:
|
| 198 |
+
The parameters to be given to the Discretizer Class.
|
| 199 |
+
|
| 200 |
+
Returns
|
| 201 |
+
-------
|
| 202 |
+
An n-D array or a DiscreteFactor object according to the discretiztion
|
| 203 |
+
method used.
|
| 204 |
+
|
| 205 |
+
Examples
|
| 206 |
+
--------
|
| 207 |
+
>>> import numpy as np
|
| 208 |
+
>>> from scipy.special import beta
|
| 209 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 210 |
+
>>> from pgmpy.factors.continuous import RoundingDiscretizer
|
| 211 |
+
>>> def dirichlet_pdf(x, y):
|
| 212 |
+
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
|
| 213 |
+
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
|
| 214 |
+
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
|
| 215 |
+
# TODO: finish this
|
| 216 |
+
"""
|
| 217 |
+
return method(self, *args, **kwargs).get_discrete_values()
|
| 218 |
+
|
| 219 |
+
def reduce(self, values, inplace=True):
|
| 220 |
+
"""
|
| 221 |
+
Reduces the factor to the context of the given variable values.
|
| 222 |
+
|
| 223 |
+
Parameters
|
| 224 |
+
----------
|
| 225 |
+
values: list, array-like
|
| 226 |
+
A list of tuples of the form (variable_name, variable_value).
|
| 227 |
+
|
| 228 |
+
inplace: boolean
|
| 229 |
+
If inplace=True it will modify the factor itself, else would return
|
| 230 |
+
a new ContinuousFactor object.
|
| 231 |
+
|
| 232 |
+
Returns
|
| 233 |
+
-------
|
| 234 |
+
ContinuousFactor or None: if inplace=True (default) returns None
|
| 235 |
+
if inplace=False returns a new ContinuousFactor instance.
|
| 236 |
+
|
| 237 |
+
Examples
|
| 238 |
+
--------
|
| 239 |
+
>>> import numpy as np
|
| 240 |
+
>>> from scipy.special import beta
|
| 241 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 242 |
+
>>> def custom_pdf(x, y, z):
|
| 243 |
+
... return z*(np.power(x, 1) * np.power(y, 2)) / beta(x, y)
|
| 244 |
+
>>> custom_factor = ContinuousFactor(['x', 'y', 'z'], custom_pdf)
|
| 245 |
+
>>> custom_factor.variables
|
| 246 |
+
['x', 'y', 'z']
|
| 247 |
+
>>> custom_factor.assignment(1, 2, 3)
|
| 248 |
+
24.0
|
| 249 |
+
|
| 250 |
+
>>> custom_factor.reduce([('y', 2)])
|
| 251 |
+
>>> custom_factor.variables
|
| 252 |
+
['x', 'z']
|
| 253 |
+
>>> custom_factor.assignment(1, 3)
|
| 254 |
+
24.0
|
| 255 |
+
"""
|
| 256 |
+
phi = self if inplace else self.copy()
|
| 257 |
+
|
| 258 |
+
phi.distribution = phi.distribution.reduce(values, inplace=False)
|
| 259 |
+
if not inplace:
|
| 260 |
+
return phi
|
| 261 |
+
|
| 262 |
+
def marginalize(self, variables, inplace=True):
|
| 263 |
+
"""
|
| 264 |
+
Marginalize the factor with respect to the given variables.
|
| 265 |
+
|
| 266 |
+
Parameters
|
| 267 |
+
----------
|
| 268 |
+
variables: list, array-like
|
| 269 |
+
List of variables with respect to which factor is to be maximized.
|
| 270 |
+
|
| 271 |
+
inplace: boolean
|
| 272 |
+
If inplace=True it will modify the factor itself, else would return
|
| 273 |
+
a new ContinuousFactor instance.
|
| 274 |
+
|
| 275 |
+
Returns
|
| 276 |
+
-------
|
| 277 |
+
DiscreteFactor or None: if inplace=True (default) returns None
|
| 278 |
+
if inplace=False returns a new ContinuousFactor instance.
|
| 279 |
+
|
| 280 |
+
Examples
|
| 281 |
+
--------
|
| 282 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 283 |
+
>>> from scipy.stats import multivariate_normal
|
| 284 |
+
>>> std_normal_pdf = lambda *x: multivariate_normal.pdf(x, [0, 0], [[1, 0], [0, 1]])
|
| 285 |
+
>>> std_normal = ContinuousFactor(['x1', 'x2'], std_normal_pdf)
|
| 286 |
+
>>> std_normal.scope()
|
| 287 |
+
['x1', 'x2']
|
| 288 |
+
>>> std_normal.assignment([1, 1])
|
| 289 |
+
0.058549831524319168
|
| 290 |
+
>>> std_normal.marginalize(['x2'])
|
| 291 |
+
>>> std_normal.scope()
|
| 292 |
+
['x1']
|
| 293 |
+
>>> std_normal.assignment(1)
|
| 294 |
+
|
| 295 |
+
"""
|
| 296 |
+
phi = self if inplace else self.copy()
|
| 297 |
+
phi.distribution = phi.distribution.marginalize(variables, inplace=False)
|
| 298 |
+
|
| 299 |
+
if not inplace:
|
| 300 |
+
return phi
|
| 301 |
+
|
| 302 |
+
def normalize(self, inplace=True):
|
| 303 |
+
"""
|
| 304 |
+
Normalizes the pdf of the continuous factor so that it integrates to
|
| 305 |
+
1 over all the variables.
|
| 306 |
+
|
| 307 |
+
Parameters
|
| 308 |
+
----------
|
| 309 |
+
inplace: boolean
|
| 310 |
+
If inplace=True it will modify the factor itself, else would return
|
| 311 |
+
a new factor.
|
| 312 |
+
|
| 313 |
+
Returns
|
| 314 |
+
-------
|
| 315 |
+
ContinuousFactor or None:
|
| 316 |
+
if inplace=True (default) returns None
|
| 317 |
+
if inplace=False returns a new ContinuousFactor instance.
|
| 318 |
+
|
| 319 |
+
Examples
|
| 320 |
+
--------
|
| 321 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 322 |
+
>>> from scipy.stats import multivariate_normal
|
| 323 |
+
>>> std_normal_pdf = lambda x: 2 * multivariate_normal.pdf(x, [0, 0], [[1, 0], [0, 1]])
|
| 324 |
+
>>> std_normal = ContinuousFactor(['x1', 'x2'], std_normal_pdf)
|
| 325 |
+
>>> std_normal.assignment(1, 1)
|
| 326 |
+
0.117099663049
|
| 327 |
+
>>> std_normal.normalize()
|
| 328 |
+
>>> std_normal.assignment(1, 1)
|
| 329 |
+
0.0585498315243
|
| 330 |
+
|
| 331 |
+
"""
|
| 332 |
+
phi = self if inplace else self.copy()
|
| 333 |
+
phi.distribution.normalize(inplace=True)
|
| 334 |
+
|
| 335 |
+
if not inplace:
|
| 336 |
+
return phi
|
| 337 |
+
|
| 338 |
+
def is_valid_cpd(self):
|
| 339 |
+
return self.distribution.is_valid_cpd()
|
| 340 |
+
|
| 341 |
+
def _operate(self, other, operation, inplace=True):
|
| 342 |
+
"""
|
| 343 |
+
Gives the ContinuousFactor operation (product or divide) with
|
| 344 |
+
the other factor.
|
| 345 |
+
|
| 346 |
+
Parameters
|
| 347 |
+
----------
|
| 348 |
+
other: ContinuousFactor
|
| 349 |
+
The ContinuousFactor to be multiplied.
|
| 350 |
+
|
| 351 |
+
operation: String
|
| 352 |
+
'product' for multiplication operation and 'divide' for
|
| 353 |
+
division operation.
|
| 354 |
+
|
| 355 |
+
inplace: boolean
|
| 356 |
+
If inplace=True it will modify the factor itself, else would return
|
| 357 |
+
a new factor.
|
| 358 |
+
|
| 359 |
+
Returns
|
| 360 |
+
-------
|
| 361 |
+
ContinuousFactor or None:
|
| 362 |
+
if inplace=True (default) returns None
|
| 363 |
+
if inplace=False returns a new `DiscreteFactor` instance.
|
| 364 |
+
|
| 365 |
+
"""
|
| 366 |
+
if not isinstance(other, ContinuousFactor):
|
| 367 |
+
raise TypeError(
|
| 368 |
+
f"ContinuousFactor objects can only be multiplied ",
|
| 369 |
+
f"or divided with another ContinuousFactor object. ",
|
| 370 |
+
f"Got {type(other)}, expected: ContinuousFactor.",
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
phi = self if inplace else self.copy()
|
| 374 |
+
phi.distribution = phi.distribution._operate(
|
| 375 |
+
other=other.distribution, operation=operation, inplace=False
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
if not inplace:
|
| 379 |
+
return phi
|
| 380 |
+
|
| 381 |
+
def product(self, other, inplace=True):
|
| 382 |
+
"""
|
| 383 |
+
Gives the ContinuousFactor product with the other factor.
|
| 384 |
+
|
| 385 |
+
Parameters
|
| 386 |
+
----------
|
| 387 |
+
other: ContinuousFactor
|
| 388 |
+
The ContinuousFactor to be multiplied.
|
| 389 |
+
|
| 390 |
+
Returns
|
| 391 |
+
-------
|
| 392 |
+
ContinuousFactor or None:
|
| 393 |
+
if inplace=True (default) returns None
|
| 394 |
+
if inplace=False returns a new `ContinuousFactor` instance.
|
| 395 |
+
|
| 396 |
+
Examples
|
| 397 |
+
--------
|
| 398 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 399 |
+
>>> from scipy.stats import multivariate_normal
|
| 400 |
+
>>> sn_pdf1 = lambda x: multivariate_normal.pdf([x], [0], [[1]])
|
| 401 |
+
>>> sn_pdf2 = lambda x1,x2: multivariate_normal.pdf([x1, x2], [0, 0], [[1, 0], [0, 1]])
|
| 402 |
+
>>> sn1 = ContinuousFactor(['x2'], sn_pdf1)
|
| 403 |
+
>>> sn2 = ContinuousFactor(['x1', 'x2'], sn_pdf2)
|
| 404 |
+
|
| 405 |
+
>>> sn3 = sn1.product(sn2, inplace=False)
|
| 406 |
+
>>> sn3.assignment(0, 0)
|
| 407 |
+
0.063493635934240983
|
| 408 |
+
|
| 409 |
+
>>> sn3 = sn1 * sn2
|
| 410 |
+
>>> sn3.assignment(0, 0)
|
| 411 |
+
0.063493635934240983
|
| 412 |
+
"""
|
| 413 |
+
return self._operate(other, "product", inplace)
|
| 414 |
+
|
| 415 |
+
def divide(self, other, inplace=True):
|
| 416 |
+
"""
|
| 417 |
+
Gives the ContinuousFactor divide with the other factor.
|
| 418 |
+
|
| 419 |
+
Parameters
|
| 420 |
+
----------
|
| 421 |
+
other: ContinuousFactor
|
| 422 |
+
The ContinuousFactor to be divided.
|
| 423 |
+
|
| 424 |
+
Returns
|
| 425 |
+
-------
|
| 426 |
+
ContinuousFactor or None:
|
| 427 |
+
if inplace=True (default) returns None
|
| 428 |
+
if inplace=False returns a new `ContinuousFactor` instance.
|
| 429 |
+
|
| 430 |
+
Examples
|
| 431 |
+
--------
|
| 432 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 433 |
+
>>> from scipy.stats import multivariate_normal
|
| 434 |
+
>>> sn_pdf1 = lambda x: multivariate_normal.pdf([x], [0], [[1]])
|
| 435 |
+
>>> sn_pdf2 = lambda x1,x2: multivariate_normal.pdf([x1, x2], [0, 0], [[1, 0], [0, 1]])
|
| 436 |
+
>>> sn1 = ContinuousFactor(['x2'], sn_pdf1)
|
| 437 |
+
>>> sn2 = ContinuousFactor(['x1', 'x2'], sn_pdf2)
|
| 438 |
+
|
| 439 |
+
>>> sn4 = sn2.divide(sn1, inplace=False)
|
| 440 |
+
>>> sn4.assignment(0, 0)
|
| 441 |
+
0.3989422804014327
|
| 442 |
+
|
| 443 |
+
>>> sn4 = sn2 / sn1
|
| 444 |
+
>>> sn4.assignment(0, 0)
|
| 445 |
+
0.3989422804014327
|
| 446 |
+
"""
|
| 447 |
+
if set(other.scope()) - set(self.scope()):
|
| 448 |
+
raise ValueError("Scope of divisor should be a subset of dividend")
|
| 449 |
+
|
| 450 |
+
return self._operate(other, "divide", inplace)
|
| 451 |
+
|
| 452 |
+
def __mul__(self, other):
|
| 453 |
+
return self.product(other, inplace=False)
|
| 454 |
+
|
| 455 |
+
def __rmul__(self, other):
|
| 456 |
+
return self.__mul__(other)
|
| 457 |
+
|
| 458 |
+
def __truediv__(self, other):
|
| 459 |
+
return self.divide(other, inplace=False)
|
| 460 |
+
|
| 461 |
+
__div__ = __truediv__
|
testbed/pgmpy__pgmpy/pgmpy/factors/continuous/LinearGaussianCPD.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from scipy.stats import multivariate_normal
|
| 5 |
+
|
| 6 |
+
from pgmpy.factors.base import BaseFactor
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class LinearGaussianCPD(BaseFactor):
|
| 10 |
+
r"""
|
| 11 |
+
For, X -> Y the Linear Gaussian model assumes that the mean
|
| 12 |
+
of Y is a linear function of mean of X and the variance of Y does
|
| 13 |
+
not depend on X.
|
| 14 |
+
|
| 15 |
+
For example,
|
| 16 |
+
|
| 17 |
+
.. math::
|
| 18 |
+
|
| 19 |
+
p(Y|X) = N(-2x + 0.9 ; 1)
|
| 20 |
+
|
| 21 |
+
Here, :math:`x` is the mean of the variable :math:`X`.
|
| 22 |
+
|
| 23 |
+
Let :math:`Y` be a continuous variable with continuous parents
|
| 24 |
+
:math:`X1, X2, \cdots, Xk`. We say that :math:`Y` has a linear Gaussian CPD
|
| 25 |
+
if there are parameters :math:`\beta_0, \beta_1, ..., \beta_k`
|
| 26 |
+
and :math:`\sigma_2` such that,
|
| 27 |
+
|
| 28 |
+
.. math:: p(Y |x1, x2, ..., xk) = \mathcal{N}(\beta_0 + x1*\beta_1 + ......... + xk*\beta_k ; \sigma_2)
|
| 29 |
+
|
| 30 |
+
In vector notation,
|
| 31 |
+
|
| 32 |
+
.. math:: p(Y |x) = \mathcal{N}(\beta_0 + \boldmath{β}.T * \boldmath{x} ; \sigma_2)
|
| 33 |
+
|
| 34 |
+
References
|
| 35 |
+
----------
|
| 36 |
+
.. [1] https://cedar.buffalo.edu/~srihari/CSE574/Chap8/Ch8-PGM-GaussianBNs/8.5%20GaussianBNs.pdf
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self, variable, evidence_mean, evidence_variance, evidence=[], beta=None
|
| 41 |
+
):
|
| 42 |
+
"""
|
| 43 |
+
Parameters
|
| 44 |
+
----------
|
| 45 |
+
|
| 46 |
+
variable: any hashable python object
|
| 47 |
+
The variable whose CPD is defined.
|
| 48 |
+
|
| 49 |
+
evidence_mean: list (array-like)
|
| 50 |
+
Mean vector (numpy array) of the joint distribution, X
|
| 51 |
+
|
| 52 |
+
evidence_variance: int, float
|
| 53 |
+
The variance of the multivariate gaussian, X = ['x1', 'x2', ..., 'xn']
|
| 54 |
+
|
| 55 |
+
evidence: iterable of any hashable python objects
|
| 56 |
+
An iterable of the parents of the variable. None if there are no parents.
|
| 57 |
+
|
| 58 |
+
beta (optional): iterable of int or float
|
| 59 |
+
An iterable representing the coefficient vector of the linear equation.
|
| 60 |
+
The first term represents the constant term in the linear equation.
|
| 61 |
+
|
| 62 |
+
Examples
|
| 63 |
+
--------
|
| 64 |
+
|
| 65 |
+
# For P(Y| X1, X2, X3) = N(-2x1 + 3x2 + 7x3 + 0.2; 9.6)
|
| 66 |
+
|
| 67 |
+
>>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
|
| 68 |
+
>>> cpd.variable
|
| 69 |
+
'Y'
|
| 70 |
+
>>> cpd.evidence
|
| 71 |
+
['x1', 'x2', 'x3']
|
| 72 |
+
>>> cpd.beta_vector
|
| 73 |
+
[0.2, -2, 3, 7]
|
| 74 |
+
|
| 75 |
+
"""
|
| 76 |
+
self.variable = variable
|
| 77 |
+
self.mean = np.array(evidence_mean)
|
| 78 |
+
self.variance = evidence_variance
|
| 79 |
+
self.evidence = evidence
|
| 80 |
+
self.sigma_yx = None
|
| 81 |
+
|
| 82 |
+
self.variables = [variable] + evidence
|
| 83 |
+
super(LinearGaussianCPD, self).__init__(
|
| 84 |
+
self.variables, pdf="gaussian", mean=self.mean, covariance=self.variance
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
def sum_of_product(self, xi, xj):
|
| 88 |
+
prod_xixj = xi * xj
|
| 89 |
+
return np.sum(prod_xixj)
|
| 90 |
+
|
| 91 |
+
def maximum_likelihood_estimator(self, data, states):
|
| 92 |
+
"""
|
| 93 |
+
Fit using MLE method.
|
| 94 |
+
|
| 95 |
+
Parameters
|
| 96 |
+
----------
|
| 97 |
+
data: pandas.DataFrame or 2D array
|
| 98 |
+
Dataframe of values containing samples from the conditional distribution, (Y|X)
|
| 99 |
+
and corresponding X values.
|
| 100 |
+
|
| 101 |
+
states: All the input states that are jointly gaussian.
|
| 102 |
+
|
| 103 |
+
Returns
|
| 104 |
+
-------
|
| 105 |
+
beta, variance (tuple): Returns estimated betas and the variance.
|
| 106 |
+
"""
|
| 107 |
+
x_df = pd.DataFrame(data, columns=states)
|
| 108 |
+
x_len = len(self.evidence)
|
| 109 |
+
|
| 110 |
+
sym_coefs = []
|
| 111 |
+
for i in range(0, x_len):
|
| 112 |
+
sym_coefs.append("b" + str(i + 1) + "_coef")
|
| 113 |
+
|
| 114 |
+
sum_x = x_df.sum()
|
| 115 |
+
x = [sum_x["(Y|X)"]]
|
| 116 |
+
coef_matrix = pd.DataFrame(columns=sym_coefs)
|
| 117 |
+
|
| 118 |
+
# First we compute just the coefficients of beta_1 to beta_N.
|
| 119 |
+
# Later we compute beta_0 and append it.
|
| 120 |
+
for i in range(0, x_len):
|
| 121 |
+
x.append(self.sum_of_product(x_df["(Y|X)"], x_df[self.evidence[i]]))
|
| 122 |
+
for j in range(0, x_len):
|
| 123 |
+
coef_matrix.loc[i, sym_coefs[j]] = self.sum_of_product(
|
| 124 |
+
x_df[self.evidence[i]], x_df[self.evidence[j]]
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
coef_matrix.insert(0, "b0_coef", sum_x[self.evidence].values)
|
| 128 |
+
row_1 = np.append([len(x_df)], sum_x[self.evidence].values)
|
| 129 |
+
coef_matrix.loc[-1] = row_1
|
| 130 |
+
coef_matrix.index = coef_matrix.index + 1 # shifting index
|
| 131 |
+
coef_matrix.sort_index(inplace=True)
|
| 132 |
+
|
| 133 |
+
beta_coef_matrix = np.matrix(coef_matrix.values, dtype="float")
|
| 134 |
+
coef_inv = np.linalg.inv(beta_coef_matrix)
|
| 135 |
+
beta_est = np.array(np.matmul(coef_inv, np.transpose(x)))
|
| 136 |
+
self.beta = beta_est[0]
|
| 137 |
+
|
| 138 |
+
sigma_est = 0
|
| 139 |
+
x_len_df = len(x_df)
|
| 140 |
+
for i in range(0, x_len):
|
| 141 |
+
for j in range(0, x_len):
|
| 142 |
+
sigma_est += (
|
| 143 |
+
self.beta[i + 1]
|
| 144 |
+
* self.beta[j + 1]
|
| 145 |
+
* (
|
| 146 |
+
self.sum_of_product(
|
| 147 |
+
x_df[self.evidence[i]], x_df[self.evidence[j]]
|
| 148 |
+
)
|
| 149 |
+
/ x_len_df
|
| 150 |
+
- np.mean(x_df[self.evidence[i]])
|
| 151 |
+
* np.mean(x_df[self.evidence[j]])
|
| 152 |
+
)
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
sigma_est = np.sqrt(
|
| 156 |
+
self.sum_of_product(x_df["(Y|X)"], x_df["(Y|X)"]) / x_len_df
|
| 157 |
+
- np.mean(x_df["(Y|X)"]) * np.mean(x_df["(Y|X)"])
|
| 158 |
+
- sigma_est
|
| 159 |
+
)
|
| 160 |
+
self.sigma_yx = sigma_est
|
| 161 |
+
return self.beta, self.sigma_yx
|
| 162 |
+
|
| 163 |
+
def fit(self, data, states, estimator=None, **kwargs):
|
| 164 |
+
"""
|
| 165 |
+
Determine βs from data
|
| 166 |
+
|
| 167 |
+
Parameters
|
| 168 |
+
----------
|
| 169 |
+
data: pandas.DataFrame
|
| 170 |
+
Dataframe containing samples from the conditional distribution, p(Y|X)
|
| 171 |
+
estimator: 'MLE' or 'MAP'
|
| 172 |
+
"""
|
| 173 |
+
if estimator == "MLE":
|
| 174 |
+
mean, variance = self.maximum_likelihood_estimator(data, states)
|
| 175 |
+
elif estimator == "MAP":
|
| 176 |
+
raise NotImplementedError(
|
| 177 |
+
"fit method has not been implemented using Maximum A-Priori (MAP)"
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
return mean, variance
|
| 181 |
+
|
| 182 |
+
@property
|
| 183 |
+
def pdf(self):
|
| 184 |
+
def _pdf(*args):
|
| 185 |
+
# The first element of args is the value of the variable on which CPD is defined
|
| 186 |
+
# and the rest of the elements give the mean values of the parent
|
| 187 |
+
# variables.
|
| 188 |
+
mean = (
|
| 189 |
+
sum([arg * coeff for (arg, coeff) in zip(args[1:], self.mean)])
|
| 190 |
+
+ self.mean[0]
|
| 191 |
+
)
|
| 192 |
+
return multivariate_normal.pdf(
|
| 193 |
+
args[0], np.array(mean), np.array([[self.variance]])
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
return _pdf
|
| 197 |
+
|
| 198 |
+
def copy(self):
|
| 199 |
+
"""
|
| 200 |
+
Returns a copy of the distribution.
|
| 201 |
+
|
| 202 |
+
Returns
|
| 203 |
+
-------
|
| 204 |
+
LinearGaussianCPD: copy of the distribution
|
| 205 |
+
|
| 206 |
+
Examples
|
| 207 |
+
--------
|
| 208 |
+
>>> from pgmpy.factors.continuous import LinearGaussianCPD
|
| 209 |
+
>>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
|
| 210 |
+
>>> copy_cpd = cpd.copy()
|
| 211 |
+
>>> copy_cpd.variable
|
| 212 |
+
'Y'
|
| 213 |
+
>>> copy_cpd.evidence
|
| 214 |
+
['X1', 'X2', 'X3']
|
| 215 |
+
"""
|
| 216 |
+
copy_cpd = LinearGaussianCPD(
|
| 217 |
+
self.variable, self.beta, self.variance, list(self.evidence)
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
return copy_cpd
|
| 221 |
+
|
| 222 |
+
def __str__(self):
|
| 223 |
+
mean = self.mean.round(3)
|
| 224 |
+
variance = round(self.variance, 3)
|
| 225 |
+
if self.evidence and list(self.mean):
|
| 226 |
+
# P(Y| X1, X2, X3) = N(-2*X1_mu + 3*X2_mu + 7*X3_mu; 0.2)
|
| 227 |
+
rep_str = "P({node} | {parents}) = N({mu} + {b_0}; {sigma})".format(
|
| 228 |
+
node=str(self.variable),
|
| 229 |
+
parents=", ".join([str(var) for var in self.evidence]),
|
| 230 |
+
mu=" + ".join(
|
| 231 |
+
[
|
| 232 |
+
f"{coeff}*{parent}"
|
| 233 |
+
for coeff, parent in zip(mean[1:], self.evidence)
|
| 234 |
+
]
|
| 235 |
+
),
|
| 236 |
+
b_0=str(mean[0]),
|
| 237 |
+
sigma=str(variance),
|
| 238 |
+
)
|
| 239 |
+
else:
|
| 240 |
+
# P(X) = N(1, 4)
|
| 241 |
+
rep_str = f"P({str(self.variable)}) = N({str(mean[0])}; {str(variance)})"
|
| 242 |
+
return rep_str
|
| 243 |
+
|
| 244 |
+
def __repr__(self):
|
| 245 |
+
str_repr = self.__str__()
|
| 246 |
+
return f"<LinearGaussianCPD: {str_repr} at {hex(id(self))}"
|
testbed/pgmpy__pgmpy/pgmpy/factors/continuous/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pgmpy.factors.distributions.CanonicalDistribution import CanonicalDistribution
|
| 2 |
+
from .ContinuousFactor import ContinuousFactor
|
| 3 |
+
from .LinearGaussianCPD import LinearGaussianCPD
|
| 4 |
+
from .discretize import BaseDiscretizer, RoundingDiscretizer, UnbiasedDiscretizer
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"CanonicalDistribution",
|
| 8 |
+
"ContinuousFactor",
|
| 9 |
+
"LinearGaussianCPD" "BaseDiscretizer",
|
| 10 |
+
"RoundingDiscretizer",
|
| 11 |
+
"UnbiasedDiscretizer",
|
| 12 |
+
]
|
testbed/pgmpy__pgmpy/pgmpy/factors/continuous/discretize.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC, abstractmethod
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy import integrate
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BaseDiscretizer(ABC):
|
| 8 |
+
"""
|
| 9 |
+
Base class for the discretizer classes in pgmpy. The discretizer
|
| 10 |
+
classes are used to discretize a continuous random variable
|
| 11 |
+
distribution into discrete probability masses.
|
| 12 |
+
|
| 13 |
+
Parameters
|
| 14 |
+
----------
|
| 15 |
+
factor: A ContinuousFactor object
|
| 16 |
+
the continuous node or factor representing the distribution
|
| 17 |
+
to be discretized.
|
| 18 |
+
|
| 19 |
+
low, high: float
|
| 20 |
+
the range over which the function will be discretized.
|
| 21 |
+
|
| 22 |
+
cardinality: int
|
| 23 |
+
the number of states required in the discretized output.
|
| 24 |
+
|
| 25 |
+
Examples
|
| 26 |
+
--------
|
| 27 |
+
>>> from scipy.stats import norm
|
| 28 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor, BaseDiscretizer
|
| 29 |
+
>>> normal = ContinuousFactor(['x'],norm(0, 1).pdf)
|
| 30 |
+
>>> class ChildDiscretizer(BaseDiscretizer):
|
| 31 |
+
... def get_discrete_values(self):
|
| 32 |
+
... pass
|
| 33 |
+
>>> discretizer = ChildDiscretizer(normal, -3, 3, 10)
|
| 34 |
+
>>> discretizer.factor
|
| 35 |
+
<pgmpy.factors.continuous.ContinuousFactor.ContinuousFactor object at 0x1316f4da0>
|
| 36 |
+
>>> discretizer.cardinality
|
| 37 |
+
10
|
| 38 |
+
>>> discretizer.get_labels()
|
| 39 |
+
['x=-3.0', 'x=-2.4', 'x=-1.8', 'x=-1.2', 'x=-0.6', 'x=0.0', 'x=0.6', 'x=1.2', 'x=1.8', 'x=2.4']
|
| 40 |
+
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __init__(self, factor, low, high, cardinality):
|
| 44 |
+
self.factor = factor
|
| 45 |
+
self.low = low
|
| 46 |
+
self.high = high
|
| 47 |
+
self.cardinality = cardinality
|
| 48 |
+
|
| 49 |
+
@abstractmethod
|
| 50 |
+
def get_discrete_values(self):
|
| 51 |
+
"""
|
| 52 |
+
This method implements the algorithm to discretize the given
|
| 53 |
+
continuous distribution.
|
| 54 |
+
|
| 55 |
+
It must be implemented by all the subclasses of BaseDiscretizer.
|
| 56 |
+
|
| 57 |
+
Returns
|
| 58 |
+
-------
|
| 59 |
+
A list of discrete values or a DiscreteFactor object.
|
| 60 |
+
"""
|
| 61 |
+
pass
|
| 62 |
+
|
| 63 |
+
def get_labels(self):
|
| 64 |
+
"""
|
| 65 |
+
Returns a list of strings representing the values about
|
| 66 |
+
which the discretization method calculates the probability
|
| 67 |
+
masses.
|
| 68 |
+
|
| 69 |
+
Default value is the points -
|
| 70 |
+
[low, low+step, low+2*step, ......... , high-step]
|
| 71 |
+
unless the method is overridden by a subclass.
|
| 72 |
+
|
| 73 |
+
Examples
|
| 74 |
+
--------
|
| 75 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor
|
| 76 |
+
>>> from pgmpy.discretize import BaseDiscretizer
|
| 77 |
+
>>> class ChildDiscretizer(BaseDiscretizer):
|
| 78 |
+
... def get_discrete_values(self):
|
| 79 |
+
... pass
|
| 80 |
+
>>> from scipy.stats import norm
|
| 81 |
+
>>> node = ContinuousFactor(['x'],norm(0).pdf)
|
| 82 |
+
>>> child = ChildDiscretizer(node, -5, 5, 20)
|
| 83 |
+
>>> chld.get_labels()
|
| 84 |
+
['x=-5.0', 'x=-4.5', 'x=-4.0', 'x=-3.5', 'x=-3.0', 'x=-2.5',
|
| 85 |
+
'x=-2.0', 'x=-1.5', 'x=-1.0', 'x=-0.5', 'x=0.0', 'x=0.5', 'x=1.0',
|
| 86 |
+
'x=1.5', 'x=2.0', 'x=2.5', 'x=3.0', 'x=3.5', 'x=4.0', 'x=4.5']
|
| 87 |
+
|
| 88 |
+
"""
|
| 89 |
+
step = (self.high - self.low) / self.cardinality
|
| 90 |
+
labels = [
|
| 91 |
+
f"x={str(i)}" for i in np.round(np.arange(self.low, self.high, step), 3)
|
| 92 |
+
]
|
| 93 |
+
return labels
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class RoundingDiscretizer(BaseDiscretizer):
|
| 97 |
+
"""
|
| 98 |
+
This class uses the rounding method for discretizing the
|
| 99 |
+
given continuous distribution.
|
| 100 |
+
|
| 101 |
+
For the rounding method,
|
| 102 |
+
|
| 103 |
+
The probability mass is,
|
| 104 |
+
cdf(x+step/2)-cdf(x), for x = low
|
| 105 |
+
|
| 106 |
+
cdf(x+step/2)-cdf(x-step/2), for low < x <= high
|
| 107 |
+
|
| 108 |
+
where, cdf is the cumulative density function of the distribution
|
| 109 |
+
and step = (high-low)/cardinality.
|
| 110 |
+
|
| 111 |
+
Examples
|
| 112 |
+
--------
|
| 113 |
+
>>> import numpy as np
|
| 114 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor,RoundingDiscretizer
|
| 115 |
+
>>> std_normal_pdf = lambda x : np.exp(-x*x/2) / (np.sqrt(2*np.pi))
|
| 116 |
+
>>> std_normal = ContinuousFactor(['x'],std_normal_pdf)
|
| 117 |
+
>>> std_normal.discretize(RoundingDiscretizer, low=-3, high=3,
|
| 118 |
+
... cardinality=12)
|
| 119 |
+
[0.001629865203424451, 0.009244709419989363, 0.027834684208773178,
|
| 120 |
+
0.065590616803038182, 0.120977578710013, 0.17466632194020804,
|
| 121 |
+
0.19741265136584729, 0.17466632194020937, 0.12097757871001302,
|
| 122 |
+
0.065590616803036905, 0.027834684208772664, 0.0092447094199902269]
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
def get_discrete_values(self):
|
| 126 |
+
step = (self.high - self.low) / self.cardinality
|
| 127 |
+
|
| 128 |
+
# for x=[low]
|
| 129 |
+
discrete_values = [
|
| 130 |
+
self.factor.cdf([(-np.inf, self.low + step / 2)])
|
| 131 |
+
- self.factor.cdf([(-np.inf, self.low)])
|
| 132 |
+
]
|
| 133 |
+
|
| 134 |
+
# for x=[low+step, low+2*step, ........., high-step]
|
| 135 |
+
points = np.linspace(self.low + step, self.high - step, self.cardinality - 1)
|
| 136 |
+
discrete_values.extend(
|
| 137 |
+
[
|
| 138 |
+
self.factor.cdf([(-np.inf, i + step / 2)])
|
| 139 |
+
- self.factor.cdf([(-np.inf, i - step / 2)])
|
| 140 |
+
for i in points
|
| 141 |
+
]
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
return discrete_values
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class UnbiasedDiscretizer(BaseDiscretizer):
|
| 148 |
+
"""
|
| 149 |
+
This class uses the unbiased method for discretizing the
|
| 150 |
+
given continuous distribution.
|
| 151 |
+
|
| 152 |
+
The unbiased method for discretization is the matching of the
|
| 153 |
+
first moment method. It involves calculating the first order
|
| 154 |
+
limited moment of the distribution which is done by the _lim_moment
|
| 155 |
+
method.
|
| 156 |
+
|
| 157 |
+
For this method,
|
| 158 |
+
|
| 159 |
+
The probability mass is,
|
| 160 |
+
(E(x) - E(x + step))/step + 1 - cdf(x), for x = low
|
| 161 |
+
|
| 162 |
+
(2 * E(x) - E(x - step) - E(x + step))/step, for low < x < high
|
| 163 |
+
|
| 164 |
+
(E(x) - E(x - step))/step - 1 + cdf(x), for x = high
|
| 165 |
+
|
| 166 |
+
where, E(x) is the first limiting moment of the distribution
|
| 167 |
+
about the point x, cdf is the cumulative density function
|
| 168 |
+
and step = (high-low)/cardinality.
|
| 169 |
+
|
| 170 |
+
References
|
| 171 |
+
----------
|
| 172 |
+
Klugman, S. A., Panjer, H. H. and Willmot, G. E.,
|
| 173 |
+
Loss Models, From Data to Decisions, Fourth Edition,
|
| 174 |
+
Wiley, section 9.6.5.2 (Method of local moment matching) and
|
| 175 |
+
exercise 9.41.
|
| 176 |
+
|
| 177 |
+
Examples
|
| 178 |
+
--------
|
| 179 |
+
>>> import numpy as np
|
| 180 |
+
>>> from pgmpy.factors.continuous import ContinuousFactor,UnbiasedDiscretizer
|
| 181 |
+
# exponential distribution with rate = 2
|
| 182 |
+
>>> exp_pdf = lambda x: 2*np.exp(-2*x) if x>=0 else 0
|
| 183 |
+
>>> exp_node = ContinuousFactor(['x'],exp_pdf)
|
| 184 |
+
>>> exp_node.discretize(UnbiasedDiscretizer, low=0, high=5, cardinality=10)
|
| 185 |
+
[0.39627368905806137, 0.4049838434034298, 0.13331784003148325,
|
| 186 |
+
0.043887287876647259, 0.014447413395300212, 0.0047559685431339703,
|
| 187 |
+
0.0015656350182896128, 0.00051540201980112557, 0.00016965346326140994,
|
| 188 |
+
3.7867260839208328e-05]
|
| 189 |
+
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
def get_discrete_values(self):
|
| 193 |
+
lev = self._lim_moment
|
| 194 |
+
step = (self.high - self.low) / (self.cardinality - 1)
|
| 195 |
+
|
| 196 |
+
# for x=[low]
|
| 197 |
+
discrete_values = [
|
| 198 |
+
(lev(self.low) - lev(self.low + step)) / step
|
| 199 |
+
+ 1
|
| 200 |
+
- self.factor.cdf([(-np.inf, self.low)])
|
| 201 |
+
]
|
| 202 |
+
|
| 203 |
+
# for x=[low+step, low+2*step, ........., high-step]
|
| 204 |
+
points = np.linspace(self.low + step, self.high - step, self.cardinality - 2)
|
| 205 |
+
discrete_values.extend(
|
| 206 |
+
[(2 * lev(i) - lev(i - step) - lev(i + step)) / step for i in points]
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
# for x=[high]
|
| 210 |
+
discrete_values.append(
|
| 211 |
+
(lev(self.high) - lev(self.high - step)) / step
|
| 212 |
+
- 1
|
| 213 |
+
+ self.factor.cdf([(-np.inf, self.high)])
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
return discrete_values
|
| 217 |
+
|
| 218 |
+
def _lim_moment(self, u, order=1):
|
| 219 |
+
"""
|
| 220 |
+
This method calculates the kth order limiting moment of
|
| 221 |
+
the distribution. It is given by -
|
| 222 |
+
|
| 223 |
+
E(u) = Integral (-inf to u) [ (x^k)*pdf(x) dx ] + (u^k)(1-cdf(u))
|
| 224 |
+
|
| 225 |
+
where, pdf is the probability density function and cdf is the
|
| 226 |
+
cumulative density function of the distribution.
|
| 227 |
+
|
| 228 |
+
Reference
|
| 229 |
+
---------
|
| 230 |
+
Klugman, S. A., Panjer, H. H. and Willmot, G. E.,
|
| 231 |
+
Loss Models, From Data to Decisions, Fourth Edition,
|
| 232 |
+
Wiley, definition 3.5 and equation 3.8.
|
| 233 |
+
|
| 234 |
+
Parameters
|
| 235 |
+
----------
|
| 236 |
+
u: float
|
| 237 |
+
The point at which the moment is to be calculated.
|
| 238 |
+
|
| 239 |
+
order: int
|
| 240 |
+
The order of the moment, default is first order.
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
def fun(x):
|
| 244 |
+
return np.power(x, order) * self.factor.pdf(x)
|
| 245 |
+
|
| 246 |
+
return integrate.quad(fun, -np.inf, u)[0] + np.power(u, order) * (
|
| 247 |
+
1 - self.factor.cdf([(-np.inf, u)])
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
def get_labels(self):
|
| 251 |
+
labels = list(
|
| 252 |
+
f"x={str(i)}"
|
| 253 |
+
for i in np.round(np.linspace(self.low, self.high, self.cardinality), 3)
|
| 254 |
+
)
|
| 255 |
+
return labels
|
testbed/pgmpy__pgmpy/pgmpy/factors/discrete/CPD.py
ADDED
|
@@ -0,0 +1,677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Contains the different formats of CPDs used in PGM"""
|
| 3 |
+
import csv
|
| 4 |
+
import numbers
|
| 5 |
+
from itertools import chain, product
|
| 6 |
+
from shutil import get_terminal_size
|
| 7 |
+
from warnings import warn
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from pgmpy import config
|
| 13 |
+
from pgmpy.extern import tabulate
|
| 14 |
+
from pgmpy.factors.discrete import DiscreteFactor
|
| 15 |
+
from pgmpy.utils import compat_fns
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TabularCPD(DiscreteFactor):
|
| 19 |
+
"""
|
| 20 |
+
Defines the conditional probability distribution table (CPD table)
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
variable: int, string (any hashable python object)
|
| 25 |
+
The variable whose CPD is defined.
|
| 26 |
+
|
| 27 |
+
variable_card: integer
|
| 28 |
+
Cardinality/no. of states of `variable`
|
| 29 |
+
|
| 30 |
+
values: 2D array, 2D list or 2D tuple
|
| 31 |
+
Values for the CPD table. Please refer the example for the
|
| 32 |
+
exact format needed.
|
| 33 |
+
|
| 34 |
+
evidence: array-like
|
| 35 |
+
List of variables in evidences(if any) w.r.t. which CPD is defined.
|
| 36 |
+
|
| 37 |
+
evidence_card: array-like
|
| 38 |
+
cardinality/no. of states of variables in `evidence`(if any)
|
| 39 |
+
|
| 40 |
+
Examples
|
| 41 |
+
--------
|
| 42 |
+
For a distribution of P(grade|diff, intel)
|
| 43 |
+
|
| 44 |
+
+---------+-------------------------+------------------------+
|
| 45 |
+
|diff: | easy | hard |
|
| 46 |
+
+---------+------+--------+---------+------+--------+--------+
|
| 47 |
+
|aptitude:| low | medium | high | low | medium | high |
|
| 48 |
+
+---------+------+--------+---------+------+--------+--------+
|
| 49 |
+
|gradeA | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
|
| 50 |
+
+---------+------+--------+---------+------+--------+--------+
|
| 51 |
+
|gradeB | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
|
| 52 |
+
+---------+------+--------+---------+------+--------+--------+
|
| 53 |
+
|gradeC | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
|
| 54 |
+
+---------+------+--------+---------+------+--------+--------+
|
| 55 |
+
|
| 56 |
+
values should be
|
| 57 |
+
[[0.1,0.1,0.1,0.1,0.1,0.1],
|
| 58 |
+
[0.1,0.1,0.1,0.1,0.1,0.1],
|
| 59 |
+
[0.8,0.8,0.8,0.8,0.8,0.8]]
|
| 60 |
+
|
| 61 |
+
>>> cpd = TabularCPD('grade',3,[[0.1,0.1,0.1,0.1,0.1,0.1],
|
| 62 |
+
... [0.1,0.1,0.1,0.1,0.1,0.1],
|
| 63 |
+
... [0.8,0.8,0.8,0.8,0.8,0.8]],
|
| 64 |
+
... evidence=['diff', 'intel'], evidence_card=[2,3])
|
| 65 |
+
>>> print(cpd)
|
| 66 |
+
+---------+---------+---------+---------+---------+---------+---------+
|
| 67 |
+
| diff | diff_0 | diff_0 | diff_0 | diff_1 | diff_1 | diff_1 |
|
| 68 |
+
+---------+---------+---------+---------+---------+---------+---------+
|
| 69 |
+
| intel | intel_0 | intel_1 | intel_2 | intel_0 | intel_1 | intel_2 |
|
| 70 |
+
+---------+---------+---------+---------+---------+---------+---------+
|
| 71 |
+
| grade_0 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
|
| 72 |
+
+---------+---------+---------+---------+---------+---------+---------+
|
| 73 |
+
| grade_1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
|
| 74 |
+
+---------+---------+---------+---------+---------+---------+---------+
|
| 75 |
+
| grade_2 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
|
| 76 |
+
+---------+---------+---------+---------+---------+---------+---------+
|
| 77 |
+
>>> cpd.values
|
| 78 |
+
array([[[ 0.1, 0.1, 0.1],
|
| 79 |
+
[ 0.1, 0.1, 0.1]],
|
| 80 |
+
[[ 0.1, 0.1, 0.1],
|
| 81 |
+
[ 0.1, 0.1, 0.1]],
|
| 82 |
+
[[ 0.8, 0.8, 0.8],
|
| 83 |
+
[ 0.8, 0.8, 0.8]]])
|
| 84 |
+
>>> cpd.variables
|
| 85 |
+
['grade', 'diff', 'intel']
|
| 86 |
+
>>> cpd.cardinality
|
| 87 |
+
array([3, 2, 3])
|
| 88 |
+
>>> cpd.variable
|
| 89 |
+
'grade'
|
| 90 |
+
>>> cpd.variable_card
|
| 91 |
+
3
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(
|
| 95 |
+
self,
|
| 96 |
+
variable,
|
| 97 |
+
variable_card,
|
| 98 |
+
values,
|
| 99 |
+
evidence=None,
|
| 100 |
+
evidence_card=None,
|
| 101 |
+
state_names={},
|
| 102 |
+
):
|
| 103 |
+
self.variable = variable
|
| 104 |
+
self.variable_card = None
|
| 105 |
+
|
| 106 |
+
variables = [variable]
|
| 107 |
+
|
| 108 |
+
if not isinstance(variable_card, numbers.Integral):
|
| 109 |
+
raise TypeError("Event cardinality must be an integer")
|
| 110 |
+
self.variable_card = variable_card
|
| 111 |
+
|
| 112 |
+
cardinality = [variable_card]
|
| 113 |
+
if evidence_card is not None:
|
| 114 |
+
if isinstance(evidence_card, numbers.Real):
|
| 115 |
+
raise TypeError("Evidence card must be a list of numbers")
|
| 116 |
+
cardinality.extend(evidence_card)
|
| 117 |
+
|
| 118 |
+
if evidence is not None:
|
| 119 |
+
if isinstance(evidence, str):
|
| 120 |
+
raise TypeError("Evidence must be list, tuple or array of strings.")
|
| 121 |
+
variables.extend(evidence)
|
| 122 |
+
if not len(evidence_card) == len(evidence):
|
| 123 |
+
raise ValueError(
|
| 124 |
+
"Length of evidence_card doesn't match length of evidence"
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
if config.BACKEND == "numpy":
|
| 128 |
+
values = np.array(values, dtype=config.get_dtype())
|
| 129 |
+
else:
|
| 130 |
+
values = (
|
| 131 |
+
torch.Tensor(values).type(config.get_dtype()).to(config.get_device())
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
if values.ndim != 2:
|
| 135 |
+
raise TypeError("Values must be a 2D list/array")
|
| 136 |
+
|
| 137 |
+
if evidence is None:
|
| 138 |
+
expected_cpd_shape = (variable_card, 1)
|
| 139 |
+
else:
|
| 140 |
+
expected_cpd_shape = (variable_card, np.prod(evidence_card))
|
| 141 |
+
if values.shape != expected_cpd_shape:
|
| 142 |
+
raise ValueError(
|
| 143 |
+
f"values must be of shape {expected_cpd_shape}. Got shape: {values.shape}"
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
if not isinstance(state_names, dict):
|
| 147 |
+
raise ValueError(
|
| 148 |
+
f"state_names must be of type dict. Got {type(state_names)}"
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
super(TabularCPD, self).__init__(
|
| 152 |
+
variables, cardinality, values.flatten(), state_names=state_names
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
def __repr__(self):
|
| 156 |
+
var_str = f"<TabularCPD representing P({self.variable}:{self.variable_card}"
|
| 157 |
+
|
| 158 |
+
evidence = self.variables[1:]
|
| 159 |
+
evidence_card = self.cardinality[1:]
|
| 160 |
+
if evidence:
|
| 161 |
+
evidence_str = " | " + ", ".join(
|
| 162 |
+
[f"{var}:{card}" for var, card in zip(evidence, evidence_card)]
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
evidence_str = ""
|
| 166 |
+
|
| 167 |
+
return var_str + evidence_str + f") at {hex(id(self))}>"
|
| 168 |
+
|
| 169 |
+
def get_values(self):
|
| 170 |
+
"""
|
| 171 |
+
Returns the values of the CPD as a 2-D array. The order of the
|
| 172 |
+
parents is the same as provided in evidence.
|
| 173 |
+
|
| 174 |
+
Examples
|
| 175 |
+
--------
|
| 176 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 177 |
+
>>> cpd = TabularCPD('grade', 3, [[0.1, 0.1],
|
| 178 |
+
... [0.1, 0.1],
|
| 179 |
+
... [0.8, 0.8]],
|
| 180 |
+
... evidence='evi1', evidence_card=2)
|
| 181 |
+
>>> cpd.get_values()
|
| 182 |
+
array([[ 0.1, 0.1],
|
| 183 |
+
[ 0.1, 0.1],
|
| 184 |
+
[ 0.8, 0.8]])
|
| 185 |
+
"""
|
| 186 |
+
if self.variable in self.variables:
|
| 187 |
+
return self.values.reshape(
|
| 188 |
+
tuple([self.cardinality[0], np.prod(self.cardinality[1:])])
|
| 189 |
+
)
|
| 190 |
+
else:
|
| 191 |
+
return self.values.reshape(tuple([np.prod(self.cardinality), 1]))
|
| 192 |
+
|
| 193 |
+
def __str__(self):
|
| 194 |
+
return self._make_table_str(tablefmt="grid")
|
| 195 |
+
|
| 196 |
+
def _str(self, phi_or_p="p", tablefmt="fancy_grid"):
|
| 197 |
+
return super(self, TabularCPD)._str(phi_or_p, tablefmt)
|
| 198 |
+
|
| 199 |
+
def _make_table_str(
|
| 200 |
+
self, tablefmt="fancy_grid", print_state_names=True, return_list=False
|
| 201 |
+
):
|
| 202 |
+
headers_list = []
|
| 203 |
+
|
| 204 |
+
# Build column headers
|
| 205 |
+
evidence = self.variables[1:]
|
| 206 |
+
evidence_card = self.cardinality[1:]
|
| 207 |
+
if evidence:
|
| 208 |
+
col_indexes = np.array(list(product(*[range(i) for i in evidence_card])))
|
| 209 |
+
if self.state_names and print_state_names:
|
| 210 |
+
for i in range(len(evidence_card)):
|
| 211 |
+
column_header = [str(evidence[i])] + [
|
| 212 |
+
"{var}({state})".format(
|
| 213 |
+
var=evidence[i], state=self.state_names[evidence[i]][d]
|
| 214 |
+
)
|
| 215 |
+
for d in col_indexes.T[i]
|
| 216 |
+
]
|
| 217 |
+
headers_list.append(column_header)
|
| 218 |
+
else:
|
| 219 |
+
for i in range(len(evidence_card)):
|
| 220 |
+
column_header = [str(evidence[i])] + [
|
| 221 |
+
f"{evidence[i]}_{d}" for d in col_indexes.T[i]
|
| 222 |
+
]
|
| 223 |
+
headers_list.append(column_header)
|
| 224 |
+
|
| 225 |
+
# Build row headers
|
| 226 |
+
if self.state_names and print_state_names:
|
| 227 |
+
variable_array = [
|
| 228 |
+
[
|
| 229 |
+
"{var}({state})".format(
|
| 230 |
+
var=self.variable, state=self.state_names[self.variable][i]
|
| 231 |
+
)
|
| 232 |
+
for i in range(self.variable_card)
|
| 233 |
+
]
|
| 234 |
+
]
|
| 235 |
+
else:
|
| 236 |
+
variable_array = [
|
| 237 |
+
[f"{self.variable}_{i}" for i in range(self.variable_card)]
|
| 238 |
+
]
|
| 239 |
+
# Stack with data
|
| 240 |
+
labeled_rows = np.hstack(
|
| 241 |
+
(np.array(variable_array).T, compat_fns.to_numpy(self.get_values()))
|
| 242 |
+
).tolist()
|
| 243 |
+
|
| 244 |
+
if return_list:
|
| 245 |
+
return headers_list + labeled_rows
|
| 246 |
+
|
| 247 |
+
# No support for multi-headers in tabulate
|
| 248 |
+
cdf_str = tabulate(headers_list + labeled_rows, tablefmt=tablefmt)
|
| 249 |
+
|
| 250 |
+
cdf_str = self._truncate_strtable(cdf_str)
|
| 251 |
+
|
| 252 |
+
return cdf_str
|
| 253 |
+
|
| 254 |
+
def _truncate_strtable(self, cdf_str):
|
| 255 |
+
terminal_width, terminal_height = get_terminal_size()
|
| 256 |
+
|
| 257 |
+
list_rows_str = cdf_str.split("\n")
|
| 258 |
+
|
| 259 |
+
table_width, table_height = len(list_rows_str[0]), len(list_rows_str)
|
| 260 |
+
|
| 261 |
+
colstr_i = np.array(
|
| 262 |
+
[pos for pos, char in enumerate(list_rows_str[0]) if char == "+"]
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
if table_width > terminal_width:
|
| 266 |
+
half_width = terminal_width // 2 - 3
|
| 267 |
+
|
| 268 |
+
left_i = colstr_i[colstr_i < half_width][-1]
|
| 269 |
+
right_i = colstr_i[(table_width - colstr_i) < half_width][0]
|
| 270 |
+
|
| 271 |
+
new_cdf_str = []
|
| 272 |
+
for temp_row_str in list_rows_str:
|
| 273 |
+
left = temp_row_str[: left_i + 1]
|
| 274 |
+
right = temp_row_str[right_i:]
|
| 275 |
+
if temp_row_str[left_i] == "+":
|
| 276 |
+
joiner = "-----"
|
| 277 |
+
else:
|
| 278 |
+
joiner = " ... "
|
| 279 |
+
new_cdf_str.append(left + joiner + right)
|
| 280 |
+
|
| 281 |
+
cdf_str = "\n".join(new_cdf_str)
|
| 282 |
+
|
| 283 |
+
# TODO: vertical limiter
|
| 284 |
+
# if table_height > terminal_height:
|
| 285 |
+
# half_height = terminal_height // 2
|
| 286 |
+
|
| 287 |
+
return cdf_str
|
| 288 |
+
|
| 289 |
+
def to_csv(self, filename):
|
| 290 |
+
"""
|
| 291 |
+
Exports the CPD to a CSV file.
|
| 292 |
+
|
| 293 |
+
Examples
|
| 294 |
+
--------
|
| 295 |
+
>>> from pgmpy.utils import get_example_model
|
| 296 |
+
>>> model = get_example_model("alarm")
|
| 297 |
+
>>> cpd = model.get_cpds("SAO2")
|
| 298 |
+
>>> cpd.to_csv(filename="sao2.cs")
|
| 299 |
+
"""
|
| 300 |
+
with open(filename, "w") as f:
|
| 301 |
+
writer = csv.writer(f)
|
| 302 |
+
writer.writerows(self._make_table_str(tablefmt="grid", return_list=True))
|
| 303 |
+
|
| 304 |
+
def copy(self):
|
| 305 |
+
"""
|
| 306 |
+
Returns a copy of the `TabularCPD` object.
|
| 307 |
+
|
| 308 |
+
Examples
|
| 309 |
+
--------
|
| 310 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 311 |
+
>>> cpd = TabularCPD('grade', 2,
|
| 312 |
+
... [[0.7, 0.6, 0.6, 0.2],[0.3, 0.4, 0.4, 0.8]],
|
| 313 |
+
... ['intel', 'diff'], [2, 2])
|
| 314 |
+
>>> copy = cpd.copy()
|
| 315 |
+
>>> copy.variable
|
| 316 |
+
'grade'
|
| 317 |
+
>>> copy.variable_card
|
| 318 |
+
2
|
| 319 |
+
>>> copy.evidence
|
| 320 |
+
['intel', 'diff']
|
| 321 |
+
>>> copy.values
|
| 322 |
+
array([[[ 0.7, 0.6],
|
| 323 |
+
[ 0.6, 0.2]],
|
| 324 |
+
[[ 0.3, 0.4],
|
| 325 |
+
[ 0.4, 0.8]]])
|
| 326 |
+
"""
|
| 327 |
+
evidence = self.variables[1:] if len(self.variables) > 1 else None
|
| 328 |
+
evidence_card = self.cardinality[1:] if len(self.variables) > 1 else None
|
| 329 |
+
return TabularCPD(
|
| 330 |
+
self.variable,
|
| 331 |
+
self.variable_card,
|
| 332 |
+
compat_fns.copy(self.get_values()),
|
| 333 |
+
evidence,
|
| 334 |
+
evidence_card,
|
| 335 |
+
state_names=self.state_names.copy(),
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
def normalize(self, inplace=True):
|
| 339 |
+
"""
|
| 340 |
+
Normalizes the cpd table. The method modifies each column of values such
|
| 341 |
+
that it sums to 1 without changing the proportion between states.
|
| 342 |
+
|
| 343 |
+
Parameters
|
| 344 |
+
----------
|
| 345 |
+
inplace: boolean
|
| 346 |
+
If inplace=True it will modify the CPD itself, else would return
|
| 347 |
+
a new CPD
|
| 348 |
+
|
| 349 |
+
Examples
|
| 350 |
+
--------
|
| 351 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 352 |
+
>>> cpd_table = TabularCPD('grade', 2,
|
| 353 |
+
... [[0.7, 0.2, 0.6, 0.2],[0.4, 0.4, 0.4, 0.8]],
|
| 354 |
+
... ['intel', 'diff'], [2, 2])
|
| 355 |
+
>>> cpd_table.normalize()
|
| 356 |
+
>>> cpd_table.get_values()
|
| 357 |
+
array([[ 0.63636364, 0.33333333, 0.6 , 0.2 ],
|
| 358 |
+
[ 0.36363636, 0.66666667, 0.4 , 0.8 ]])
|
| 359 |
+
"""
|
| 360 |
+
tabular_cpd = self if inplace else self.copy()
|
| 361 |
+
cpd = tabular_cpd.get_values()
|
| 362 |
+
tabular_cpd.values = (cpd / cpd.sum(axis=0)).reshape(
|
| 363 |
+
tuple(tabular_cpd.cardinality)
|
| 364 |
+
)
|
| 365 |
+
if not inplace:
|
| 366 |
+
return tabular_cpd
|
| 367 |
+
|
| 368 |
+
def marginalize(self, variables, inplace=True):
|
| 369 |
+
"""
|
| 370 |
+
Modifies the CPD table with marginalized values. Marginalization refers to
|
| 371 |
+
summing out variables, hence that variable would no longer appear in the
|
| 372 |
+
CPD.
|
| 373 |
+
|
| 374 |
+
Parameters
|
| 375 |
+
----------
|
| 376 |
+
variables: list, array-like
|
| 377 |
+
list of variable to be marginalized
|
| 378 |
+
|
| 379 |
+
inplace: boolean
|
| 380 |
+
If inplace=True it will modify the CPD itself, else would return
|
| 381 |
+
a new CPD
|
| 382 |
+
|
| 383 |
+
Examples
|
| 384 |
+
--------
|
| 385 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 386 |
+
>>> cpd_table = TabularCPD('grade', 2,
|
| 387 |
+
... [[0.7, 0.6, 0.6, 0.2],[0.3, 0.4, 0.4, 0.8]],
|
| 388 |
+
... ['intel', 'diff'], [2, 2])
|
| 389 |
+
>>> cpd_table.marginalize(['diff'])
|
| 390 |
+
>>> cpd_table.get_values()
|
| 391 |
+
array([[ 0.65, 0.4 ],
|
| 392 |
+
[ 0.35, 0.6 ]])
|
| 393 |
+
"""
|
| 394 |
+
if self.variable in variables:
|
| 395 |
+
raise ValueError(
|
| 396 |
+
"Marginalization not allowed on the variable on which CPD is defined"
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
tabular_cpd = self if inplace else self.copy()
|
| 400 |
+
|
| 401 |
+
super(TabularCPD, tabular_cpd).marginalize(variables)
|
| 402 |
+
tabular_cpd.normalize()
|
| 403 |
+
|
| 404 |
+
if not inplace:
|
| 405 |
+
return tabular_cpd
|
| 406 |
+
|
| 407 |
+
def reduce(self, values, inplace=True, show_warnings=True):
|
| 408 |
+
"""
|
| 409 |
+
Reduces the cpd table to the context of given variable values. Reduce fixes the
|
| 410 |
+
state of given variable to specified value. The reduced variables will no longer
|
| 411 |
+
appear in the CPD.
|
| 412 |
+
|
| 413 |
+
Parameters
|
| 414 |
+
----------
|
| 415 |
+
values: list, array-like
|
| 416 |
+
A list of tuples of the form (variable_name, variable_state).
|
| 417 |
+
|
| 418 |
+
inplace: boolean
|
| 419 |
+
If inplace=True it will modify the factor itself, else would return
|
| 420 |
+
a new factor.
|
| 421 |
+
|
| 422 |
+
Examples
|
| 423 |
+
--------
|
| 424 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 425 |
+
>>> cpd_table = TabularCPD('grade', 2,
|
| 426 |
+
... [[0.7, 0.6, 0.6, 0.2],[0.3, 0.4, 0.4, 0.8]],
|
| 427 |
+
... ['intel', 'diff'], [2, 2])
|
| 428 |
+
>>> cpd_table.reduce([('diff', 0)])
|
| 429 |
+
>>> cpd_table.get_values()
|
| 430 |
+
array([[ 0.7, 0.6],
|
| 431 |
+
[ 0.3, 0.4]])
|
| 432 |
+
"""
|
| 433 |
+
if self.variable in (value[0] for value in values):
|
| 434 |
+
raise ValueError(
|
| 435 |
+
"Reduce not allowed on the variable on which CPD is defined"
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
tabular_cpd = self if inplace else self.copy()
|
| 439 |
+
|
| 440 |
+
super(TabularCPD, tabular_cpd).reduce(values, show_warnings=show_warnings)
|
| 441 |
+
tabular_cpd.normalize()
|
| 442 |
+
|
| 443 |
+
if not inplace:
|
| 444 |
+
return tabular_cpd
|
| 445 |
+
|
| 446 |
+
def to_factor(self):
|
| 447 |
+
"""
|
| 448 |
+
Returns an equivalent factor with the same variables, cardinality, values as that of the CPD.
|
| 449 |
+
Since factor doesn't distinguish between conditional and non-conditional distributions,
|
| 450 |
+
evidence information will be lost.
|
| 451 |
+
|
| 452 |
+
Examples
|
| 453 |
+
--------
|
| 454 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 455 |
+
>>> cpd = TabularCPD('grade', 3, [[0.1, 0.1],
|
| 456 |
+
... [0.1, 0.1],
|
| 457 |
+
... [0.8, 0.8]],
|
| 458 |
+
... evidence='evi1', evidence_card=2)
|
| 459 |
+
>>> factor = cpd.to_factor()
|
| 460 |
+
>>> factor
|
| 461 |
+
<DiscreteFactor representing phi(grade:3, evi1:2) at 0x7f847a4f2d68>
|
| 462 |
+
"""
|
| 463 |
+
factor = DiscreteFactor.__new__(DiscreteFactor)
|
| 464 |
+
factor.variables = self.variables.copy()
|
| 465 |
+
factor.cardinality = self.cardinality.copy()
|
| 466 |
+
factor.values = compat_fns.copy(self.values)
|
| 467 |
+
factor.state_names = self.state_names.copy()
|
| 468 |
+
factor.name_to_no = self.name_to_no.copy()
|
| 469 |
+
factor.no_to_name = self.no_to_name.copy()
|
| 470 |
+
return factor
|
| 471 |
+
|
| 472 |
+
def reorder_parents(self, new_order, inplace=True):
|
| 473 |
+
"""
|
| 474 |
+
Returns a new cpd table according to provided parent/evidence order.
|
| 475 |
+
|
| 476 |
+
Parameters
|
| 477 |
+
----------
|
| 478 |
+
new_order: list
|
| 479 |
+
list of new ordering of variables
|
| 480 |
+
|
| 481 |
+
inplace: boolean
|
| 482 |
+
If inplace == True it will modify the CPD itself
|
| 483 |
+
otherwise new value will be returned without affecting old values
|
| 484 |
+
|
| 485 |
+
Examples
|
| 486 |
+
--------
|
| 487 |
+
|
| 488 |
+
Consider a CPD P(grade| diff, intel)
|
| 489 |
+
|
| 490 |
+
>>> cpd = TabularCPD('grade',3,[[0.1,0.1,0.0,0.4,0.2,0.1],
|
| 491 |
+
... [0.3,0.2,0.1,0.4,0.3,0.2],
|
| 492 |
+
... [0.6,0.7,0.9,0.2,0.5,0.7]],
|
| 493 |
+
... evidence=['diff', 'intel'], evidence_card=[2,3])
|
| 494 |
+
>>> print(cpd)
|
| 495 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 496 |
+
| diff | diff(0) | diff(0) | diff(0) | diff(1) | diff(1) | diff(1) |
|
| 497 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 498 |
+
| intel | intel(0) | intel(1) | intel(2) | intel(0) | intel(1) | intel(2) |
|
| 499 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 500 |
+
| grade(0) | 0.1 | 0.1 | 0.0 | 0.4 | 0.2 | 0.1 |
|
| 501 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 502 |
+
| grade(1) | 0.3 | 0.2 | 0.1 | 0.4 | 0.3 | 0.2 |
|
| 503 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 504 |
+
| grade(2) | 0.6 | 0.7 | 0.9 | 0.2 | 0.5 | 0.7 |
|
| 505 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 506 |
+
>>> cpd.values
|
| 507 |
+
array([[[ 0.1, 0.1, 0. ],
|
| 508 |
+
[ 0.4, 0.2, 0.1]],
|
| 509 |
+
[[ 0.3, 0.2, 0.1],
|
| 510 |
+
[ 0.4, 0.3, 0.2]],
|
| 511 |
+
[[ 0.6, 0.7, 0.9],
|
| 512 |
+
[ 0.2, 0.5, 0.7]]])
|
| 513 |
+
>>> cpd.variables
|
| 514 |
+
['grade', 'diff', 'intel']
|
| 515 |
+
>>> cpd.cardinality
|
| 516 |
+
array([3, 2, 3])
|
| 517 |
+
>>> cpd.variable
|
| 518 |
+
'grade'
|
| 519 |
+
>>> cpd.variable_card
|
| 520 |
+
3
|
| 521 |
+
>>> cpd.reorder_parents(['intel', 'diff'])
|
| 522 |
+
array([[0.1, 0.4, 0.1, 0.2, 0. , 0.1],
|
| 523 |
+
[0.3, 0.4, 0.2, 0.3, 0.1, 0.2],
|
| 524 |
+
[0.6, 0.2, 0.7, 0.5, 0.9, 0.7]])
|
| 525 |
+
>>> print(cpd)
|
| 526 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 527 |
+
| intel | intel(0) | intel(0) | intel(1) | intel(1) | intel(2) | intel(2) |
|
| 528 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 529 |
+
| diff | diff(0) | diff(1) | diff(0) | diff(1) | diff(0) | diff(1) |
|
| 530 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 531 |
+
| grade(0) | 0.1 | 0.4 | 0.1 | 0.2 | 0.0 | 0.1 |
|
| 532 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 533 |
+
| grade(1) | 0.3 | 0.4 | 0.2 | 0.3 | 0.1 | 0.2 |
|
| 534 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 535 |
+
| grade(2) | 0.6 | 0.2 | 0.7 | 0.5 | 0.9 | 0.7 |
|
| 536 |
+
+----------+----------+----------+----------+----------+----------+----------+
|
| 537 |
+
>>> cpd.values
|
| 538 |
+
array([[[0.1, 0.4],
|
| 539 |
+
[0.1, 0.2],
|
| 540 |
+
[0. , 0.1]],
|
| 541 |
+
[[0.3, 0.4],
|
| 542 |
+
[0.2, 0.3],
|
| 543 |
+
[0.1, 0.2]],
|
| 544 |
+
[[0.6, 0.2],
|
| 545 |
+
[0.7, 0.5],
|
| 546 |
+
[0.9, 0.7]]])
|
| 547 |
+
>>> cpd.variables
|
| 548 |
+
['grade', 'intel', 'diff']
|
| 549 |
+
>>> cpd.cardinality
|
| 550 |
+
array([3, 3, 2])
|
| 551 |
+
>>> cpd.variable
|
| 552 |
+
'grade'
|
| 553 |
+
>>> cpd.variable_card
|
| 554 |
+
3
|
| 555 |
+
"""
|
| 556 |
+
if (
|
| 557 |
+
len(self.variables) <= 1
|
| 558 |
+
or (set(new_order) - set(self.variables))
|
| 559 |
+
or (set(self.variables[1:]) - set(new_order))
|
| 560 |
+
):
|
| 561 |
+
raise ValueError("New order either has missing or extra arguments")
|
| 562 |
+
else:
|
| 563 |
+
if new_order != self.variables[1:]:
|
| 564 |
+
evidence = self.variables[1:]
|
| 565 |
+
evidence_card = self.cardinality[1:]
|
| 566 |
+
card_map = dict(zip(evidence, evidence_card))
|
| 567 |
+
old_pos_map = dict(zip(evidence, range(len(evidence))))
|
| 568 |
+
trans_ord = [0] + [(old_pos_map[letter] + 1) for letter in new_order]
|
| 569 |
+
new_values = compat_fns.transpose(self.values, tuple(trans_ord))
|
| 570 |
+
|
| 571 |
+
if inplace:
|
| 572 |
+
variables = [self.variables[0]] + new_order
|
| 573 |
+
cardinality = [self.variable_card] + [
|
| 574 |
+
card_map[var] for var in new_order
|
| 575 |
+
]
|
| 576 |
+
super(TabularCPD, self).__init__(
|
| 577 |
+
variables, cardinality, new_values.flatten()
|
| 578 |
+
)
|
| 579 |
+
return self.get_values()
|
| 580 |
+
else:
|
| 581 |
+
return new_values.reshape(
|
| 582 |
+
tuple(
|
| 583 |
+
[
|
| 584 |
+
self.cardinality[0],
|
| 585 |
+
np.prod([card_map[var] for var in new_order]),
|
| 586 |
+
]
|
| 587 |
+
)
|
| 588 |
+
)
|
| 589 |
+
else:
|
| 590 |
+
warn("Same ordering provided as current")
|
| 591 |
+
return self.get_values()
|
| 592 |
+
|
| 593 |
+
def get_evidence(self):
|
| 594 |
+
"""
|
| 595 |
+
Returns the evidence variables of the CPD.
|
| 596 |
+
"""
|
| 597 |
+
return self.variables[:0:-1]
|
| 598 |
+
|
| 599 |
+
@staticmethod
|
| 600 |
+
def get_random(
|
| 601 |
+
variable, evidence=None, cardinality=None, state_names={}, seed=None
|
| 602 |
+
):
|
| 603 |
+
"""
|
| 604 |
+
Generates a TabularCPD instance with random values on `variable` with
|
| 605 |
+
parents/evidence `evidence` with cardinality/number of states as given
|
| 606 |
+
in `cardinality`.
|
| 607 |
+
|
| 608 |
+
Parameters
|
| 609 |
+
----------
|
| 610 |
+
variable: str, int or any hashable python object.
|
| 611 |
+
The variable on which to define the TabularCPD.
|
| 612 |
+
|
| 613 |
+
evidence: list, array-like
|
| 614 |
+
A list of variable names which are the parents/evidence of `variable`.
|
| 615 |
+
|
| 616 |
+
cardinality: dict (default: None)
|
| 617 |
+
A dict of the form {var_name: card} specifying the number of states/
|
| 618 |
+
cardinality of each of the variables. If None, assigns each variable
|
| 619 |
+
2 states.
|
| 620 |
+
|
| 621 |
+
state_names: dict (default: {})
|
| 622 |
+
A dict of the form {var_name: list of states} to specify the state names
|
| 623 |
+
for the variables in the CPD. If state_names=None, integral state names
|
| 624 |
+
starting from 0 is assigned.
|
| 625 |
+
|
| 626 |
+
Returns
|
| 627 |
+
-------
|
| 628 |
+
Random CPD: pgmpy.factors.discrete.TabularCPD
|
| 629 |
+
A TabularCPD object on `variable` with `evidence` as evidence with random values.
|
| 630 |
+
|
| 631 |
+
Examples
|
| 632 |
+
--------
|
| 633 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 634 |
+
>>> TabularCPD(variable='A', evidence=['C', 'D'],
|
| 635 |
+
... cardinality={'A': 3, 'B': 2, 'C': 4})
|
| 636 |
+
<TabularCPD representing P(A:3 | C:4, B:2) at 0x7f95e22b8040>
|
| 637 |
+
>>> TabularCPD(variable='A', evidence=['C', 'D'],
|
| 638 |
+
... cardinality={'A': 2, 'B': 2, 'C': 2},
|
| 639 |
+
... state_names={'A': ['a1', 'a2'],
|
| 640 |
+
... 'B': ['b1', 'b2'],
|
| 641 |
+
... 'C': ['c1', 'c2']})
|
| 642 |
+
"""
|
| 643 |
+
generator = np.random.default_rng(seed=seed)
|
| 644 |
+
|
| 645 |
+
if evidence is None:
|
| 646 |
+
evidence = []
|
| 647 |
+
|
| 648 |
+
if cardinality is None:
|
| 649 |
+
cardinality = {var: 2 for var in chain([variable], evidence)}
|
| 650 |
+
else:
|
| 651 |
+
for var in chain([variable], evidence):
|
| 652 |
+
if var not in cardinality.keys():
|
| 653 |
+
raise ValueError(f"Cardinality for variable: {var} not specified.")
|
| 654 |
+
|
| 655 |
+
if len(evidence) == 0:
|
| 656 |
+
values = generator.random((cardinality[variable], 1))
|
| 657 |
+
values = values / np.sum(values, axis=0)
|
| 658 |
+
node_cpd = TabularCPD(
|
| 659 |
+
variable=variable,
|
| 660 |
+
variable_card=cardinality[variable],
|
| 661 |
+
values=values,
|
| 662 |
+
state_names=state_names,
|
| 663 |
+
)
|
| 664 |
+
else:
|
| 665 |
+
parent_card = [cardinality[var] for var in evidence]
|
| 666 |
+
values = generator.random((cardinality[variable], np.prod(parent_card)))
|
| 667 |
+
values = values / np.sum(values, axis=0)
|
| 668 |
+
node_cpd = TabularCPD(
|
| 669 |
+
variable=variable,
|
| 670 |
+
variable_card=cardinality[variable],
|
| 671 |
+
values=values,
|
| 672 |
+
evidence=evidence,
|
| 673 |
+
evidence_card=parent_card,
|
| 674 |
+
state_names=state_names,
|
| 675 |
+
)
|
| 676 |
+
|
| 677 |
+
return node_cpd
|
testbed/pgmpy__pgmpy/pgmpy/factors/discrete/DiscreteFactor.py
ADDED
|
@@ -0,0 +1,1043 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import namedtuple
|
| 2 |
+
from itertools import product
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from pgmpy import config
|
| 9 |
+
from pgmpy.extern import tabulate
|
| 10 |
+
from pgmpy.factors.base import BaseFactor
|
| 11 |
+
from pgmpy.global_vars import logger
|
| 12 |
+
from pgmpy.utils import StateNameMixin, compat_fns
|
| 13 |
+
|
| 14 |
+
State = namedtuple("State", ["var", "state"])
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class DiscreteFactor(BaseFactor, StateNameMixin):
|
| 18 |
+
"""
|
| 19 |
+
Initialize a `DiscreteFactor` class.
|
| 20 |
+
|
| 21 |
+
Defined above, we have the following mapping from variable
|
| 22 |
+
assignments to the index of the row vector in the value field:
|
| 23 |
+
|
| 24 |
+
+-----+-----+-----+-------------------+
|
| 25 |
+
| x1 | x2 | x3 | phi(x1, x2, x3)|
|
| 26 |
+
+-----+-----+-----+-------------------+
|
| 27 |
+
| x1_0| x2_0| x3_0| phi.value(0) |
|
| 28 |
+
+-----+-----+-----+-------------------+
|
| 29 |
+
| x1_0| x2_0| x3_1| phi.value(1) |
|
| 30 |
+
+-----+-----+-----+-------------------+
|
| 31 |
+
| x1_0| x2_1| x3_0| phi.value(2) |
|
| 32 |
+
+-----+-----+-----+-------------------+
|
| 33 |
+
| x1_0| x2_1| x3_1| phi.value(3) |
|
| 34 |
+
+-----+-----+-----+-------------------+
|
| 35 |
+
| x1_1| x2_0| x3_0| phi.value(4) |
|
| 36 |
+
+-----+-----+-----+-------------------+
|
| 37 |
+
| x1_1| x2_0| x3_1| phi.value(5) |
|
| 38 |
+
+-----+-----+-----+-------------------+
|
| 39 |
+
| x1_1| x2_1| x3_0| phi.value(6) |
|
| 40 |
+
+-----+-----+-----+-------------------+
|
| 41 |
+
| x1_1| x2_1| x3_1| phi.value(7) |
|
| 42 |
+
+-----+-----+-----+-------------------+
|
| 43 |
+
|
| 44 |
+
Parameters
|
| 45 |
+
----------
|
| 46 |
+
variables: list, array-like
|
| 47 |
+
List of variables on which the factor is to be defined i.e. scope of the factor.
|
| 48 |
+
|
| 49 |
+
cardinality: list, array_like
|
| 50 |
+
List of cardinalities/no.of states of each variable. `cardinality`
|
| 51 |
+
array must have a value corresponding to each variable in
|
| 52 |
+
`variables`.
|
| 53 |
+
|
| 54 |
+
values: list, array_like
|
| 55 |
+
List of values of factor.
|
| 56 |
+
A DiscreteFactor's values are stored in a row vector in the value
|
| 57 |
+
using an ordering such that the left-most variables as defined in
|
| 58 |
+
`variables` cycle through their values the fastest. Please refer
|
| 59 |
+
to examples for usage examples.
|
| 60 |
+
|
| 61 |
+
Examples
|
| 62 |
+
--------
|
| 63 |
+
>>> import numpy as np
|
| 64 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 65 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
|
| 66 |
+
>>> phi
|
| 67 |
+
<DiscreteFactor representing phi(x1:2, x2:2, x3:2) at 0x7f8188fcaa90>
|
| 68 |
+
>>> print(phi)
|
| 69 |
+
+------+------+------+-----------------+
|
| 70 |
+
| x1 | x2 | x3 | phi(x1,x2,x3) |
|
| 71 |
+
|------+------+------+-----------------|
|
| 72 |
+
| x1_0 | x2_0 | x3_0 | 1.0000 |
|
| 73 |
+
| x1_0 | x2_0 | x3_1 | 1.0000 |
|
| 74 |
+
| x1_0 | x2_1 | x3_0 | 1.0000 |
|
| 75 |
+
| x1_0 | x2_1 | x3_1 | 1.0000 |
|
| 76 |
+
| x1_1 | x2_0 | x3_0 | 1.0000 |
|
| 77 |
+
| x1_1 | x2_0 | x3_1 | 1.0000 |
|
| 78 |
+
| x1_1 | x2_1 | x3_0 | 1.0000 |
|
| 79 |
+
| x1_1 | x2_1 | x3_1 | 1.0000 |
|
| 80 |
+
+------+------+------+-----------------+
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def __init__(self, variables, cardinality, values, state_names={}):
|
| 84 |
+
if isinstance(variables, str):
|
| 85 |
+
raise TypeError("Variables: Expected type list or array like, got string")
|
| 86 |
+
|
| 87 |
+
if config.BACKEND == "numpy":
|
| 88 |
+
values = np.array(values, dtype=config.get_dtype())
|
| 89 |
+
else:
|
| 90 |
+
values = (
|
| 91 |
+
torch.Tensor(values).type(config.get_dtype()).to(config.get_device())
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
if len(cardinality) != len(variables):
|
| 95 |
+
raise ValueError(
|
| 96 |
+
"Number of elements in cardinality must be equal to number of variables"
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
if compat_fns.size(values) != np.prod(cardinality):
|
| 100 |
+
raise ValueError(f"Values array must be of size: {np.prod(cardinality)}")
|
| 101 |
+
|
| 102 |
+
if len(set(variables)) != len(variables):
|
| 103 |
+
raise ValueError("Variable names cannot be same")
|
| 104 |
+
|
| 105 |
+
if not isinstance(state_names, dict):
|
| 106 |
+
raise ValueError(
|
| 107 |
+
f"state_names must be of type dict. Got {type(state_names)}."
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
self.variables = list(variables)
|
| 111 |
+
self.cardinality = np.array(cardinality, dtype=int)
|
| 112 |
+
self.values = values.reshape(tuple(self.cardinality))
|
| 113 |
+
|
| 114 |
+
# Set the state names
|
| 115 |
+
super(DiscreteFactor, self).store_state_names(
|
| 116 |
+
variables, cardinality, state_names
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
def scope(self):
|
| 120 |
+
"""
|
| 121 |
+
Returns the scope of the factor i.e. the variables on which the factor is defined.
|
| 122 |
+
|
| 123 |
+
Returns
|
| 124 |
+
-------
|
| 125 |
+
Scope of the factor: list
|
| 126 |
+
List of variables on which the factor is defined.
|
| 127 |
+
|
| 128 |
+
Examples
|
| 129 |
+
--------
|
| 130 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 131 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12))
|
| 132 |
+
>>> phi.scope()
|
| 133 |
+
['x1', 'x2', 'x3']
|
| 134 |
+
"""
|
| 135 |
+
return self.variables
|
| 136 |
+
|
| 137 |
+
def get_cardinality(self, variables):
|
| 138 |
+
"""
|
| 139 |
+
Returns the cardinality/no.of states of each variable in `variables`.
|
| 140 |
+
|
| 141 |
+
Parameters
|
| 142 |
+
----------
|
| 143 |
+
variables: list, array-like
|
| 144 |
+
A list of variable names.
|
| 145 |
+
|
| 146 |
+
Returns
|
| 147 |
+
-------
|
| 148 |
+
Cardinality of variables: dict
|
| 149 |
+
Dictionary of the form {variable: variable_cardinality}
|
| 150 |
+
|
| 151 |
+
Examples
|
| 152 |
+
--------
|
| 153 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 154 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 155 |
+
>>> phi.get_cardinality(['x1'])
|
| 156 |
+
{'x1': 2}
|
| 157 |
+
>>> phi.get_cardinality(['x1', 'x2'])
|
| 158 |
+
{'x1': 2, 'x2': 3}
|
| 159 |
+
"""
|
| 160 |
+
if isinstance(variables, str):
|
| 161 |
+
raise TypeError("variables: Expected type list or array-like, got type str")
|
| 162 |
+
|
| 163 |
+
if not all([var in self.variables for var in variables]):
|
| 164 |
+
raise ValueError("Variable not in scope")
|
| 165 |
+
|
| 166 |
+
return {var: self.cardinality[self.variables.index(var)] for var in variables}
|
| 167 |
+
|
| 168 |
+
def get_value(self, **kwargs):
|
| 169 |
+
"""
|
| 170 |
+
Returns the value of the given variable states. Assumes that the arguments
|
| 171 |
+
specified are state names, and falls back to considering it as state no if
|
| 172 |
+
can't find the state name.
|
| 173 |
+
|
| 174 |
+
Parameters
|
| 175 |
+
----------
|
| 176 |
+
kwargs: named arguments of the form variable=state_name
|
| 177 |
+
Spcifies the state of each of the variable for which to get
|
| 178 |
+
the value.
|
| 179 |
+
|
| 180 |
+
Returns
|
| 181 |
+
-------
|
| 182 |
+
value of kwargs: float
|
| 183 |
+
The value of specified states.
|
| 184 |
+
|
| 185 |
+
Examples
|
| 186 |
+
--------
|
| 187 |
+
>>> from pgmpy.utils import get_example_model
|
| 188 |
+
>>> model = get_example_model("asia")
|
| 189 |
+
>>> phi = model.get_cpds("either").to_factor()
|
| 190 |
+
>>> phi.get_value(lung="yes", tub="no", either="yes")
|
| 191 |
+
1.0
|
| 192 |
+
"""
|
| 193 |
+
for variable in kwargs.keys():
|
| 194 |
+
if variable not in self.variables:
|
| 195 |
+
raise ValueError(f"Factor doesn't have the variable: {variable}")
|
| 196 |
+
|
| 197 |
+
index = []
|
| 198 |
+
for var in self.variables:
|
| 199 |
+
if var not in kwargs.keys():
|
| 200 |
+
raise ValueError(f"Variable: {var} not found in arguments")
|
| 201 |
+
else:
|
| 202 |
+
try:
|
| 203 |
+
index.append(self.name_to_no[var][kwargs[var]])
|
| 204 |
+
except KeyError:
|
| 205 |
+
logger.info(f"Using {var} state as number instead of name.")
|
| 206 |
+
index.append(kwargs[var])
|
| 207 |
+
return self.values[tuple(index)]
|
| 208 |
+
|
| 209 |
+
def set_value(self, value, **kwargs):
|
| 210 |
+
"""
|
| 211 |
+
Sets the probability value of the given variable states.
|
| 212 |
+
|
| 213 |
+
Parameters
|
| 214 |
+
----------
|
| 215 |
+
value: float
|
| 216 |
+
The value for the specified state.
|
| 217 |
+
|
| 218 |
+
kwargs: named arguments of the form variable=state_name
|
| 219 |
+
Spcifies the state of each of the variable for which to get
|
| 220 |
+
the probability value.
|
| 221 |
+
|
| 222 |
+
Returns
|
| 223 |
+
-------
|
| 224 |
+
None
|
| 225 |
+
|
| 226 |
+
Examples
|
| 227 |
+
--------
|
| 228 |
+
>>> from pgmpy.utils import get_example_model
|
| 229 |
+
>>> model = get_example_model("asia")
|
| 230 |
+
>>> phi = model.get_cpds("either").to_factor()
|
| 231 |
+
>>> phi.set_value(value=0.1, lung="yes", tub="no", either="yes")
|
| 232 |
+
>>> phi.get_value(lung='yes', tub='no', either='yes')
|
| 233 |
+
0.1
|
| 234 |
+
"""
|
| 235 |
+
if not isinstance(value, (float, int)):
|
| 236 |
+
raise ValueError(f"value must be float. Got: {type(value)}.")
|
| 237 |
+
|
| 238 |
+
for variable in kwargs.keys():
|
| 239 |
+
if variable not in self.variables:
|
| 240 |
+
raise ValueError(f"Factor doesn't have the variable: {variable}")
|
| 241 |
+
|
| 242 |
+
index = []
|
| 243 |
+
for var in self.variables:
|
| 244 |
+
if var not in kwargs.keys():
|
| 245 |
+
raise ValueError(f"Variable: {var} not found in arguments")
|
| 246 |
+
elif isinstance(kwargs[var], str):
|
| 247 |
+
index.append(self.name_to_no[var][kwargs[var]])
|
| 248 |
+
else:
|
| 249 |
+
logger.info(f"Using {var} state as number instead of name.")
|
| 250 |
+
index.append(kwargs[var])
|
| 251 |
+
|
| 252 |
+
self.values[tuple(index)] = value
|
| 253 |
+
|
| 254 |
+
def assignment(self, index):
|
| 255 |
+
"""
|
| 256 |
+
Returns a list of assignments (variable and state) for the corresponding index.
|
| 257 |
+
|
| 258 |
+
Parameters
|
| 259 |
+
----------
|
| 260 |
+
index: list, array-like
|
| 261 |
+
List of indices whose assignment is to be computed
|
| 262 |
+
|
| 263 |
+
Returns
|
| 264 |
+
-------
|
| 265 |
+
Full assignments: list
|
| 266 |
+
Returns a list of full assignments of all the variables of the factor.
|
| 267 |
+
|
| 268 |
+
Examples
|
| 269 |
+
--------
|
| 270 |
+
>>> import numpy as np
|
| 271 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 272 |
+
>>> phi = DiscreteFactor(['diff', 'intel'], [2, 2], np.ones(4))
|
| 273 |
+
>>> phi.assignment([1, 2])
|
| 274 |
+
[[('diff', 0), ('intel', 1)], [('diff', 1), ('intel', 0)]]
|
| 275 |
+
"""
|
| 276 |
+
if config.get_backend() == "numpy":
|
| 277 |
+
index = np.array(index)
|
| 278 |
+
else:
|
| 279 |
+
if (len(index) == 1) and (isinstance(index[0], torch.Tensor)):
|
| 280 |
+
index = index[0][None]
|
| 281 |
+
else:
|
| 282 |
+
index = torch.tensor(index, dtype=torch.int, device=config.get_device())
|
| 283 |
+
|
| 284 |
+
max_possible_index = np.prod(self.cardinality) - 1
|
| 285 |
+
if not all(i <= max_possible_index for i in index):
|
| 286 |
+
raise IndexError("Index greater than max possible index")
|
| 287 |
+
|
| 288 |
+
assignments = compat_fns.get_compute_backend().zeros(
|
| 289 |
+
(len(index), len(self.scope())), dtype=int
|
| 290 |
+
)
|
| 291 |
+
rev_card = self.cardinality[::-1]
|
| 292 |
+
for i, card in enumerate(rev_card):
|
| 293 |
+
assignments[:, i] = index % card
|
| 294 |
+
index = index // card
|
| 295 |
+
|
| 296 |
+
assignments = compat_fns.flip(assignments, axis=(1,))
|
| 297 |
+
|
| 298 |
+
return [
|
| 299 |
+
[
|
| 300 |
+
(key, self.get_state_names(key, int(val)))
|
| 301 |
+
for key, val in zip(self.variables, values)
|
| 302 |
+
]
|
| 303 |
+
for values in assignments
|
| 304 |
+
]
|
| 305 |
+
|
| 306 |
+
def identity_factor(self):
|
| 307 |
+
"""
|
| 308 |
+
Returns the identity factor.
|
| 309 |
+
|
| 310 |
+
Def: The identity factor of a factor has the same scope and cardinality as the original factor,
|
| 311 |
+
but the values for all the assignments is 1. When the identity factor is multiplied with
|
| 312 |
+
the factor it returns the factor itself.
|
| 313 |
+
|
| 314 |
+
Returns
|
| 315 |
+
-------
|
| 316 |
+
Identity factor: pgmpy.factors.discrete.DiscreteFactor.
|
| 317 |
+
Returns a factor with all values set to 1.
|
| 318 |
+
|
| 319 |
+
Examples
|
| 320 |
+
--------
|
| 321 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 322 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 323 |
+
>>> phi_identity = phi.identity_factor()
|
| 324 |
+
>>> phi_identity.variables
|
| 325 |
+
['x1', 'x2', 'x3']
|
| 326 |
+
>>> phi_identity.values
|
| 327 |
+
array([[[ 1., 1.],
|
| 328 |
+
[ 1., 1.],
|
| 329 |
+
[ 1., 1.]],
|
| 330 |
+
[[ 1., 1.],
|
| 331 |
+
[ 1., 1.],
|
| 332 |
+
[ 1., 1.]]])
|
| 333 |
+
"""
|
| 334 |
+
return DiscreteFactor(
|
| 335 |
+
variables=self.variables,
|
| 336 |
+
cardinality=self.cardinality,
|
| 337 |
+
values=compat_fns.ones(compat_fns.size(self.values)),
|
| 338 |
+
state_names=self.state_names,
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
def marginalize(self, variables, inplace=True):
|
| 342 |
+
"""
|
| 343 |
+
Modifies the factor with marginalized values.
|
| 344 |
+
|
| 345 |
+
Parameters
|
| 346 |
+
----------
|
| 347 |
+
variables: list, array-like
|
| 348 |
+
List of variables over which to marginalize.
|
| 349 |
+
|
| 350 |
+
inplace: boolean
|
| 351 |
+
If inplace=True it will modify the factor itself, else would return
|
| 352 |
+
a new factor.
|
| 353 |
+
|
| 354 |
+
Returns
|
| 355 |
+
-------
|
| 356 |
+
Marginalized factor: pgmpy.factors.discrete.DiscreteFactor or None
|
| 357 |
+
If inplace=True (default) returns None else returns a new `DiscreteFactor` instance.
|
| 358 |
+
|
| 359 |
+
Examples
|
| 360 |
+
--------
|
| 361 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 362 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 363 |
+
>>> phi.marginalize(['x1', 'x3'])
|
| 364 |
+
>>> phi.values
|
| 365 |
+
array([14., 22., 30.])
|
| 366 |
+
>>> phi.variables
|
| 367 |
+
['x2']
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
if isinstance(variables, str):
|
| 371 |
+
raise TypeError("variables: Expected type list or array-like, got type str")
|
| 372 |
+
|
| 373 |
+
phi = self if inplace else self.copy()
|
| 374 |
+
|
| 375 |
+
for var in variables:
|
| 376 |
+
if var not in phi.variables:
|
| 377 |
+
raise ValueError(f"{var} not in scope.")
|
| 378 |
+
|
| 379 |
+
var_indexes = [phi.variables.index(var) for var in variables]
|
| 380 |
+
|
| 381 |
+
index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))
|
| 382 |
+
n_variables = len(self.variables)
|
| 383 |
+
phi.variables = [phi.variables[index] for index in index_to_keep]
|
| 384 |
+
phi.cardinality = phi.cardinality[index_to_keep]
|
| 385 |
+
phi.del_state_names(variables)
|
| 386 |
+
|
| 387 |
+
phi.values = compat_fns.einsum(phi.values, range(n_variables), index_to_keep)
|
| 388 |
+
|
| 389 |
+
if not inplace:
|
| 390 |
+
return phi
|
| 391 |
+
|
| 392 |
+
def maximize(self, variables, inplace=True):
|
| 393 |
+
"""
|
| 394 |
+
Maximizes the factor with respect to `variables`.
|
| 395 |
+
|
| 396 |
+
Parameters
|
| 397 |
+
----------
|
| 398 |
+
variables: list, array-like
|
| 399 |
+
List of variables with respect to which factor is to be maximized
|
| 400 |
+
|
| 401 |
+
inplace: boolean
|
| 402 |
+
If inplace=True it will modify the factor itself, else would return
|
| 403 |
+
a new factor.
|
| 404 |
+
|
| 405 |
+
Returns
|
| 406 |
+
-------
|
| 407 |
+
Maximized factor: pgmpy.factors.discrete.DiscreteFactor or None
|
| 408 |
+
If inplace=True (default) returns None else inplace=False returns a
|
| 409 |
+
new `DiscreteFactor` instance.
|
| 410 |
+
|
| 411 |
+
Examples
|
| 412 |
+
--------
|
| 413 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 414 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
|
| 415 |
+
... 0.00, 0.00, 0.15, 0.21, 0.09, 0.18])
|
| 416 |
+
>>> phi.variables
|
| 417 |
+
['x1', 'x2', 'x3']
|
| 418 |
+
>>> phi.maximize(['x2'])
|
| 419 |
+
>>> phi.variables
|
| 420 |
+
['x1', 'x3']
|
| 421 |
+
>>> phi.cardinality
|
| 422 |
+
array([3, 2])
|
| 423 |
+
>>> phi.values
|
| 424 |
+
array([[ 0.25, 0.35],
|
| 425 |
+
[ 0.05, 0.07],
|
| 426 |
+
[ 0.15, 0.21]])
|
| 427 |
+
"""
|
| 428 |
+
if isinstance(variables, str):
|
| 429 |
+
raise TypeError("variables: Expected type list or array-like, got type str")
|
| 430 |
+
|
| 431 |
+
phi = self if inplace else self.copy()
|
| 432 |
+
|
| 433 |
+
for var in variables:
|
| 434 |
+
if var not in phi.variables:
|
| 435 |
+
raise ValueError(f"{var} not in scope.")
|
| 436 |
+
|
| 437 |
+
var_indexes = [phi.variables.index(var) for var in variables]
|
| 438 |
+
|
| 439 |
+
index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))
|
| 440 |
+
phi.variables = [phi.variables[index] for index in index_to_keep]
|
| 441 |
+
phi.cardinality = phi.cardinality[index_to_keep]
|
| 442 |
+
phi.del_state_names(variables)
|
| 443 |
+
phi.values = compat_fns.max(phi.values, axis=tuple(var_indexes))
|
| 444 |
+
|
| 445 |
+
if not inplace:
|
| 446 |
+
return phi
|
| 447 |
+
|
| 448 |
+
def normalize(self, inplace=True):
|
| 449 |
+
"""
|
| 450 |
+
Normalizes the values of factor so that they sum to 1.
|
| 451 |
+
|
| 452 |
+
Parameters
|
| 453 |
+
----------
|
| 454 |
+
inplace: boolean
|
| 455 |
+
If inplace=True it will modify the factor itself, else would return
|
| 456 |
+
a new factor
|
| 457 |
+
|
| 458 |
+
Returns
|
| 459 |
+
-------
|
| 460 |
+
Normalized factor: pgmpy.factors.discrete.DiscreteFactor or None
|
| 461 |
+
If inplace=True (default) returns None else returns a new `DiscreteFactor` instance.
|
| 462 |
+
|
| 463 |
+
Examples
|
| 464 |
+
--------
|
| 465 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 466 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 467 |
+
>>> phi.values
|
| 468 |
+
array([[[ 0., 1.],
|
| 469 |
+
[ 2., 3.],
|
| 470 |
+
[ 4., 5.]],
|
| 471 |
+
[[ 6., 7.],
|
| 472 |
+
[ 8., 9.],
|
| 473 |
+
[10., 11.]]])
|
| 474 |
+
>>> phi.normalize()
|
| 475 |
+
>>> phi.variables
|
| 476 |
+
['x1', 'x2', 'x3']
|
| 477 |
+
>>> phi.cardinality
|
| 478 |
+
array([2, 3, 2])
|
| 479 |
+
>>> phi.values
|
| 480 |
+
array([[[ 0. , 0.01515152],
|
| 481 |
+
[ 0.03030303, 0.04545455],
|
| 482 |
+
[ 0.06060606, 0.07575758]],
|
| 483 |
+
[[ 0.09090909, 0.10606061],
|
| 484 |
+
[ 0.12121212, 0.13636364],
|
| 485 |
+
[ 0.15151515, 0.16666667]]])
|
| 486 |
+
"""
|
| 487 |
+
phi = self if inplace else self.copy()
|
| 488 |
+
|
| 489 |
+
phi.values = phi.values / (phi.values.sum())
|
| 490 |
+
|
| 491 |
+
if not inplace:
|
| 492 |
+
return phi
|
| 493 |
+
|
| 494 |
+
def reduce(self, values, inplace=True, show_warnings=True):
|
| 495 |
+
"""
|
| 496 |
+
Reduces the factor to the context of given variable values. The variables which
|
| 497 |
+
are reduced would be removed from the factor.
|
| 498 |
+
|
| 499 |
+
Parameters
|
| 500 |
+
----------
|
| 501 |
+
values: list, array-like
|
| 502 |
+
A list of tuples of the form (variable_name, variable_state).
|
| 503 |
+
|
| 504 |
+
inplace: boolean
|
| 505 |
+
If inplace=True it will modify the factor itself, else would return
|
| 506 |
+
a new factor.
|
| 507 |
+
|
| 508 |
+
show_warnings: boolean
|
| 509 |
+
Whether to show warning when state name not found.
|
| 510 |
+
|
| 511 |
+
Returns
|
| 512 |
+
-------
|
| 513 |
+
Reduced factor: pgmpy.factors.discrete.DiscreteFactor or None
|
| 514 |
+
If inplace=True (default) returns None else returns a new `DiscreteFactor` instance.
|
| 515 |
+
|
| 516 |
+
Examples
|
| 517 |
+
--------
|
| 518 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 519 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 520 |
+
>>> phi.reduce([('x1', 0), ('x2', 0)])
|
| 521 |
+
>>> phi.variables
|
| 522 |
+
['x3']
|
| 523 |
+
>>> phi.cardinality
|
| 524 |
+
array([2])
|
| 525 |
+
>>> phi.values
|
| 526 |
+
array([0., 1.])
|
| 527 |
+
"""
|
| 528 |
+
# Check if values is an array
|
| 529 |
+
if isinstance(values, str):
|
| 530 |
+
raise TypeError("values: Expected type list or array-like, got type str")
|
| 531 |
+
|
| 532 |
+
if not all([isinstance(state_tuple, tuple) for state_tuple in values]):
|
| 533 |
+
raise TypeError(
|
| 534 |
+
"values: Expected type list of tuples, get type {type}", type(values[0])
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
# Check if all variables in values are in the factor
|
| 538 |
+
for var, _ in values:
|
| 539 |
+
if var not in self.variables:
|
| 540 |
+
raise ValueError(f"The variable: {var} is not in the factor")
|
| 541 |
+
|
| 542 |
+
phi = self if inplace else self.copy()
|
| 543 |
+
|
| 544 |
+
# Convert the state names to state number. If state name not found treat them as
|
| 545 |
+
# state numbers.
|
| 546 |
+
try:
|
| 547 |
+
values = [
|
| 548 |
+
(var, self.get_state_no(var, state_name)) for var, state_name in values
|
| 549 |
+
]
|
| 550 |
+
except KeyError:
|
| 551 |
+
if show_warnings:
|
| 552 |
+
logger.warning(
|
| 553 |
+
"Found unknown state name. Trying to switch to using all state names as state numbers"
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
var_index_to_del = []
|
| 557 |
+
slice_ = [slice(None)] * len(self.variables)
|
| 558 |
+
for var, state in values:
|
| 559 |
+
var_index = phi.variables.index(var)
|
| 560 |
+
slice_[var_index] = state
|
| 561 |
+
var_index_to_del.append(var_index)
|
| 562 |
+
|
| 563 |
+
var_index_to_keep = sorted(
|
| 564 |
+
set(range(len(phi.variables))) - set(var_index_to_del)
|
| 565 |
+
)
|
| 566 |
+
# set difference is not guaranteed to maintain ordering
|
| 567 |
+
phi.variables = [phi.variables[index] for index in var_index_to_keep]
|
| 568 |
+
phi.cardinality = phi.cardinality[var_index_to_keep]
|
| 569 |
+
phi.del_state_names([var for var, _ in values])
|
| 570 |
+
|
| 571 |
+
phi.values = phi.values[tuple(slice_)]
|
| 572 |
+
|
| 573 |
+
if not inplace:
|
| 574 |
+
return phi
|
| 575 |
+
|
| 576 |
+
def sum(self, phi1, inplace=True):
|
| 577 |
+
"""
|
| 578 |
+
DiscreteFactor sum with `phi1`.
|
| 579 |
+
|
| 580 |
+
Parameters
|
| 581 |
+
----------
|
| 582 |
+
phi1: float or `DiscreteFactor` instance.
|
| 583 |
+
If float, the value is added to each value in the factor.
|
| 584 |
+
DiscreteFactor to be added.
|
| 585 |
+
|
| 586 |
+
inplace: boolean
|
| 587 |
+
If inplace=True it will modify the factor itself, else would return
|
| 588 |
+
a new factor.
|
| 589 |
+
|
| 590 |
+
Returns
|
| 591 |
+
-------
|
| 592 |
+
Summed factor: pgmpy.factors.discrete.DiscreteFactor or None
|
| 593 |
+
If inplace=True (default) returns None else returns a new `DiscreteFactor` instance.
|
| 594 |
+
|
| 595 |
+
Examples
|
| 596 |
+
--------
|
| 597 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 598 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 599 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 600 |
+
>>> phi1.sum(phi2, inplace=True)
|
| 601 |
+
>>> phi1.variables
|
| 602 |
+
['x1', 'x2', 'x3', 'x4']
|
| 603 |
+
>>> phi1.cardinality
|
| 604 |
+
array([2, 3, 2, 2])
|
| 605 |
+
>>> phi1.values
|
| 606 |
+
array([[[[ 0., 2.],
|
| 607 |
+
[ 5., 7.]],
|
| 608 |
+
[[ 2., 4.],
|
| 609 |
+
[ 7., 9.]],
|
| 610 |
+
[[ 4., 6.],
|
| 611 |
+
[ 9., 11.]]],
|
| 612 |
+
[[[ 7., 9.],
|
| 613 |
+
[12., 14.]],
|
| 614 |
+
[[ 9., 11.],
|
| 615 |
+
[14., 16.]],
|
| 616 |
+
[[11., 13.],
|
| 617 |
+
[16., 18.]]]])
|
| 618 |
+
"""
|
| 619 |
+
phi = self if inplace else self.copy()
|
| 620 |
+
if isinstance(phi1, (int, float)):
|
| 621 |
+
phi.values += phi1
|
| 622 |
+
else:
|
| 623 |
+
phi1 = phi1.copy()
|
| 624 |
+
|
| 625 |
+
# modifying phi to add new variables
|
| 626 |
+
extra_vars = set(phi1.variables) - set(phi.variables)
|
| 627 |
+
if extra_vars:
|
| 628 |
+
slice_ = [slice(None)] * len(phi.variables)
|
| 629 |
+
slice_.extend([np.newaxis] * len(extra_vars))
|
| 630 |
+
phi.values = phi.values[tuple(slice_)]
|
| 631 |
+
|
| 632 |
+
phi.variables.extend(extra_vars)
|
| 633 |
+
|
| 634 |
+
new_var_card = phi1.get_cardinality(extra_vars)
|
| 635 |
+
phi.cardinality = np.append(
|
| 636 |
+
phi.cardinality, [new_var_card[var] for var in extra_vars]
|
| 637 |
+
)
|
| 638 |
+
phi.add_state_names(phi1)
|
| 639 |
+
|
| 640 |
+
# modifying phi1 to add new variables
|
| 641 |
+
extra_vars = set(phi.variables) - set(phi1.variables)
|
| 642 |
+
if extra_vars:
|
| 643 |
+
slice_ = [slice(None)] * len(phi1.variables)
|
| 644 |
+
slice_.extend([np.newaxis] * len(extra_vars))
|
| 645 |
+
phi1.values = phi1.values[tuple(slice_)]
|
| 646 |
+
|
| 647 |
+
phi1.variables.extend(extra_vars)
|
| 648 |
+
# No need to modify cardinality as we don't need it.
|
| 649 |
+
|
| 650 |
+
# rearranging the axes of phi1 to match phi
|
| 651 |
+
for axis in range(phi.values.ndim):
|
| 652 |
+
exchange_index = phi1.variables.index(phi.variables[axis])
|
| 653 |
+
phi1.variables[axis], phi1.variables[exchange_index] = (
|
| 654 |
+
phi1.variables[exchange_index],
|
| 655 |
+
phi1.variables[axis],
|
| 656 |
+
)
|
| 657 |
+
phi1.values = phi1.values.swapaxes(axis, exchange_index)
|
| 658 |
+
|
| 659 |
+
phi.values = phi.values + phi1.values
|
| 660 |
+
|
| 661 |
+
if not inplace:
|
| 662 |
+
return phi
|
| 663 |
+
|
| 664 |
+
def product(self, phi1, inplace=True):
|
| 665 |
+
"""
|
| 666 |
+
DiscreteFactor product with `phi1`.
|
| 667 |
+
|
| 668 |
+
Parameters
|
| 669 |
+
----------
|
| 670 |
+
phi1: float or `DiscreteFactor` instance
|
| 671 |
+
If float, all the values are multiplied with `phi1`.
|
| 672 |
+
else if `DiscreteFactor` instance, mutliply based on matching rows.
|
| 673 |
+
|
| 674 |
+
inplace: boolean
|
| 675 |
+
If inplace=True it will modify the factor itself, else would return
|
| 676 |
+
a new factor.
|
| 677 |
+
|
| 678 |
+
Returns
|
| 679 |
+
-------
|
| 680 |
+
Multiplied factor: pgmpy.factors.discrete.DiscreteFactor or None
|
| 681 |
+
If inplace=True (default) returns None else returns a new `DiscreteFactor` instance.
|
| 682 |
+
|
| 683 |
+
Examples
|
| 684 |
+
--------
|
| 685 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 686 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 687 |
+
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
|
| 688 |
+
>>> phi1.product(phi2, inplace=True)
|
| 689 |
+
>>> phi1.variables
|
| 690 |
+
['x1', 'x2', 'x3', 'x4']
|
| 691 |
+
>>> phi1.cardinality
|
| 692 |
+
array([2, 3, 2, 2])
|
| 693 |
+
>>> phi1.values
|
| 694 |
+
array([[[[ 0, 0],
|
| 695 |
+
[ 4, 6]],
|
| 696 |
+
[[ 0, 4],
|
| 697 |
+
[12, 18]],
|
| 698 |
+
[[ 0, 8],
|
| 699 |
+
[20, 30]]],
|
| 700 |
+
[[[ 6, 18],
|
| 701 |
+
[35, 49]],
|
| 702 |
+
[[ 8, 24],
|
| 703 |
+
[45, 63]],
|
| 704 |
+
[[10, 30],
|
| 705 |
+
[55, 77]]]]
|
| 706 |
+
"""
|
| 707 |
+
phi = self if inplace else self.copy()
|
| 708 |
+
if isinstance(phi1, (int, float)):
|
| 709 |
+
phi.values *= phi1
|
| 710 |
+
else:
|
| 711 |
+
# Compute the new values
|
| 712 |
+
new_variables = list(set(phi.variables).union(phi1.variables))
|
| 713 |
+
var_to_int = {var: index for index, var in enumerate(new_variables)}
|
| 714 |
+
phi.values = compat_fns.einsum(
|
| 715 |
+
phi.values,
|
| 716 |
+
[var_to_int[var] for var in phi.variables],
|
| 717 |
+
phi1.values,
|
| 718 |
+
[var_to_int[var] for var in phi1.variables],
|
| 719 |
+
range(len(new_variables)),
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
# Compute the new cardinality array
|
| 723 |
+
phi_card = {var: card for var, card in zip(phi.variables, phi.cardinality)}
|
| 724 |
+
phi1_card = {
|
| 725 |
+
var: card for var, card in zip(phi1.variables, phi1.cardinality)
|
| 726 |
+
}
|
| 727 |
+
phi_card.update(phi1_card)
|
| 728 |
+
phi.cardinality = np.array([phi_card[var] for var in new_variables])
|
| 729 |
+
|
| 730 |
+
# Set the new variables and state names
|
| 731 |
+
phi.variables = new_variables
|
| 732 |
+
phi.add_state_names(phi1)
|
| 733 |
+
|
| 734 |
+
if not inplace:
|
| 735 |
+
return phi
|
| 736 |
+
|
| 737 |
+
def divide(self, phi1, inplace=True):
|
| 738 |
+
"""
|
| 739 |
+
DiscreteFactor division by `phi1`.
|
| 740 |
+
|
| 741 |
+
Parameters
|
| 742 |
+
----------
|
| 743 |
+
phi1 : `DiscreteFactor` instance
|
| 744 |
+
The denominator for division.
|
| 745 |
+
|
| 746 |
+
inplace: boolean
|
| 747 |
+
If inplace=True it will modify the factor itself, else would return
|
| 748 |
+
a new factor.
|
| 749 |
+
|
| 750 |
+
Returns
|
| 751 |
+
-------
|
| 752 |
+
Divided factor: pgmpy.factors.discrete.DiscreteFactor or None
|
| 753 |
+
If inplace=True (default) returns None else returns a new `DiscreteFactor` instance.
|
| 754 |
+
|
| 755 |
+
Examples
|
| 756 |
+
--------
|
| 757 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 758 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 759 |
+
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5))
|
| 760 |
+
>>> phi1.divide(phi2)
|
| 761 |
+
>>> phi1.variables
|
| 762 |
+
['x1', 'x2', 'x3']
|
| 763 |
+
>>> phi1.cardinality
|
| 764 |
+
array([2, 3, 2])
|
| 765 |
+
>>> phi1.values
|
| 766 |
+
array([[[ 0. , 0.33333333],
|
| 767 |
+
[ 2. , 1. ],
|
| 768 |
+
[ 4. , 1.66666667]],
|
| 769 |
+
[[ 3. , 1.75 ],
|
| 770 |
+
[ 4. , 2.25 ],
|
| 771 |
+
[ 5. , 2.75 ]]])
|
| 772 |
+
"""
|
| 773 |
+
phi = self if inplace else self.copy()
|
| 774 |
+
phi1 = phi1.copy()
|
| 775 |
+
|
| 776 |
+
if set(phi1.variables) - set(phi.variables):
|
| 777 |
+
raise ValueError("Scope of divisor should be a subset of dividend")
|
| 778 |
+
|
| 779 |
+
# Adding extra variables in phi1.
|
| 780 |
+
extra_vars = set(phi.variables) - set(phi1.variables)
|
| 781 |
+
if extra_vars:
|
| 782 |
+
slice_ = [slice(None)] * len(phi1.variables)
|
| 783 |
+
slice_.extend([np.newaxis] * len(extra_vars))
|
| 784 |
+
phi1.values = phi1.values[tuple(slice_)]
|
| 785 |
+
|
| 786 |
+
phi1.variables.extend(extra_vars)
|
| 787 |
+
|
| 788 |
+
# Rearranging the axes of phi1 to match phi
|
| 789 |
+
for axis in range(phi.values.ndim):
|
| 790 |
+
exchange_index = phi1.variables.index(phi.variables[axis])
|
| 791 |
+
phi1.variables[axis], phi1.variables[exchange_index] = (
|
| 792 |
+
phi1.variables[exchange_index],
|
| 793 |
+
phi1.variables[axis],
|
| 794 |
+
)
|
| 795 |
+
phi1.values = phi1.values.swapaxes(axis, exchange_index)
|
| 796 |
+
|
| 797 |
+
phi.values = phi.values / phi1.values
|
| 798 |
+
|
| 799 |
+
# If factor division 0/0 = 0 but is undefined for x/0. In pgmpy we are using
|
| 800 |
+
# np.inf to represent x/0 cases.
|
| 801 |
+
phi.values[config.get_compute_backend().isnan(phi.values)] = 0
|
| 802 |
+
|
| 803 |
+
if not inplace:
|
| 804 |
+
return phi
|
| 805 |
+
|
| 806 |
+
def sample(self, n, seed=None):
|
| 807 |
+
"""
|
| 808 |
+
Normalizes the factor and samples state combinations from it.
|
| 809 |
+
|
| 810 |
+
Parameters
|
| 811 |
+
----------
|
| 812 |
+
n: int
|
| 813 |
+
Number of samples to generate.
|
| 814 |
+
|
| 815 |
+
seed: int (default: None)
|
| 816 |
+
The seed value for the random number generator.
|
| 817 |
+
|
| 818 |
+
Examples
|
| 819 |
+
--------
|
| 820 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 821 |
+
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
|
| 822 |
+
>>> phi1.sample(5)
|
| 823 |
+
x1 x2 x3
|
| 824 |
+
0 1 0 0
|
| 825 |
+
1 0 2 0
|
| 826 |
+
2 1 2 0
|
| 827 |
+
3 1 1 1
|
| 828 |
+
4 1 1 1
|
| 829 |
+
"""
|
| 830 |
+
phi = self.normalize(inplace=False)
|
| 831 |
+
p = phi.values.ravel()
|
| 832 |
+
|
| 833 |
+
# TODO: Fix this to make it work natively in torch.
|
| 834 |
+
p = compat_fns.to_numpy(p)
|
| 835 |
+
|
| 836 |
+
rng = np.random.default_rng(seed=seed)
|
| 837 |
+
indexes = rng.choice(range(len(p)), size=n, p=p)
|
| 838 |
+
samples = []
|
| 839 |
+
index_to_state = {}
|
| 840 |
+
for index in indexes:
|
| 841 |
+
if index in index_to_state:
|
| 842 |
+
samples.append(index_to_state[index])
|
| 843 |
+
else:
|
| 844 |
+
assignment = self.assignment([index])[0]
|
| 845 |
+
samples.append(assignment)
|
| 846 |
+
index_to_state[index] = assignment
|
| 847 |
+
|
| 848 |
+
return pd.DataFrame([{k: v for k, v in s} for s in samples])
|
| 849 |
+
|
| 850 |
+
def copy(self):
|
| 851 |
+
"""
|
| 852 |
+
Returns a copy of the factor.
|
| 853 |
+
|
| 854 |
+
Returns
|
| 855 |
+
-------
|
| 856 |
+
Copy of self: pgmpy.factors.discrete.DiscreteFactor
|
| 857 |
+
A copy of the original discrete factor.
|
| 858 |
+
|
| 859 |
+
Examples
|
| 860 |
+
--------
|
| 861 |
+
>>> import numpy as np
|
| 862 |
+
>>> from pgmpy.factors.discrete import DiscreteFactor
|
| 863 |
+
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 3], np.arange(18))
|
| 864 |
+
>>> phi_copy = phi.copy()
|
| 865 |
+
>>> phi_copy.variables
|
| 866 |
+
['x1', 'x2', 'x3']
|
| 867 |
+
>>> phi_copy.cardinality
|
| 868 |
+
array([2, 3, 3])
|
| 869 |
+
>>> phi_copy.values
|
| 870 |
+
array([[[ 0, 1, 2],
|
| 871 |
+
[ 3, 4, 5],
|
| 872 |
+
[ 6, 7, 8]],
|
| 873 |
+
[[ 9, 10, 11],
|
| 874 |
+
[12, 13, 14],
|
| 875 |
+
[15, 16, 17]]])
|
| 876 |
+
"""
|
| 877 |
+
copy = DiscreteFactor.__new__(self.__class__)
|
| 878 |
+
copy.variables = [*self.variables]
|
| 879 |
+
copy.cardinality = np.array(self.cardinality)
|
| 880 |
+
copy.values = compat_fns.copy(self.values)
|
| 881 |
+
copy.state_names = self.state_names.copy()
|
| 882 |
+
copy.no_to_name = self.no_to_name.copy()
|
| 883 |
+
copy.name_to_no = self.name_to_no.copy()
|
| 884 |
+
return copy
|
| 885 |
+
|
| 886 |
+
def is_valid_cpd(self):
|
| 887 |
+
"""
|
| 888 |
+
Checks if the factor's values can be used for a valid CPD.
|
| 889 |
+
"""
|
| 890 |
+
return config.get_compute_backend().allclose(
|
| 891 |
+
self.to_factor()
|
| 892 |
+
.marginalize(self.scope()[:1], inplace=False)
|
| 893 |
+
.values.flatten(),
|
| 894 |
+
compat_fns.ones(np.prod(self.cardinality[:0:-1])),
|
| 895 |
+
atol=0.01,
|
| 896 |
+
)
|
| 897 |
+
|
| 898 |
+
def __str__(self):
|
| 899 |
+
return self._str(phi_or_p="phi", tablefmt="grid")
|
| 900 |
+
|
| 901 |
+
def _str(self, phi_or_p="phi", tablefmt="grid", print_state_names=True):
|
| 902 |
+
"""
|
| 903 |
+
Generate the string from `__str__` method.
|
| 904 |
+
|
| 905 |
+
Parameters
|
| 906 |
+
----------
|
| 907 |
+
phi_or_p: 'phi' | 'p'
|
| 908 |
+
'phi': When used for Factors.
|
| 909 |
+
'p': When used for CPDs.
|
| 910 |
+
print_state_names: boolean
|
| 911 |
+
If True, the user defined state names are displayed.
|
| 912 |
+
"""
|
| 913 |
+
string_header = list(map(str, self.scope()))
|
| 914 |
+
string_header.append(f"{phi_or_p}({','.join(string_header)})")
|
| 915 |
+
|
| 916 |
+
value_index = 0
|
| 917 |
+
factor_table = []
|
| 918 |
+
for prob in product(*[range(card) for card in self.cardinality]):
|
| 919 |
+
if self.state_names and print_state_names:
|
| 920 |
+
prob_list = [
|
| 921 |
+
"{var}({state})".format(
|
| 922 |
+
var=list(self.variables)[i],
|
| 923 |
+
state=self.state_names[list(self.variables)[i]][prob[i]],
|
| 924 |
+
)
|
| 925 |
+
for i in range(len(self.variables))
|
| 926 |
+
]
|
| 927 |
+
else:
|
| 928 |
+
prob_list = [
|
| 929 |
+
f"{list(self.variables)[i]}_{prob[i]}"
|
| 930 |
+
for i in range(len(self.variables))
|
| 931 |
+
]
|
| 932 |
+
|
| 933 |
+
prob_list.append(self.values.ravel()[value_index])
|
| 934 |
+
factor_table.append(prob_list)
|
| 935 |
+
value_index += 1
|
| 936 |
+
|
| 937 |
+
return tabulate(
|
| 938 |
+
factor_table, headers=string_header, tablefmt=tablefmt, floatfmt=".4f"
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
def __repr__(self):
|
| 942 |
+
var_card = ", ".join(
|
| 943 |
+
[f"{var}:{card}" for var, card in zip(self.variables, self.cardinality)]
|
| 944 |
+
)
|
| 945 |
+
return f"<DiscreteFactor representing phi({var_card}) at {hex(id(self))}>"
|
| 946 |
+
|
| 947 |
+
def __mul__(self, other):
|
| 948 |
+
return self.product(other, inplace=False)
|
| 949 |
+
|
| 950 |
+
def __rmul__(self, other):
|
| 951 |
+
return self.__mul__(other)
|
| 952 |
+
|
| 953 |
+
def __add__(self, other):
|
| 954 |
+
return self.sum(other, inplace=False)
|
| 955 |
+
|
| 956 |
+
def __radd__(self, other):
|
| 957 |
+
return self.__add__(other)
|
| 958 |
+
|
| 959 |
+
def __truediv__(self, other):
|
| 960 |
+
return self.divide(other, inplace=False)
|
| 961 |
+
|
| 962 |
+
__div__ = __truediv__
|
| 963 |
+
|
| 964 |
+
def __eq__(self, other, atol=1e-08):
|
| 965 |
+
"""
|
| 966 |
+
Method for checking if two factors are equal.
|
| 967 |
+
|
| 968 |
+
Parameters
|
| 969 |
+
----------
|
| 970 |
+
atol: float
|
| 971 |
+
The maximum allowed difference in values to be considered equal.
|
| 972 |
+
"""
|
| 973 |
+
if not (isinstance(self, DiscreteFactor) and isinstance(other, DiscreteFactor)):
|
| 974 |
+
return False
|
| 975 |
+
|
| 976 |
+
elif set(self.scope()) != set(other.scope()):
|
| 977 |
+
return False
|
| 978 |
+
|
| 979 |
+
else:
|
| 980 |
+
# Change the axis so that the variables are in the same order.
|
| 981 |
+
phi = other.copy()
|
| 982 |
+
if self.variables != phi.variables:
|
| 983 |
+
for axis in range(self.values.ndim):
|
| 984 |
+
exchange_index = phi.variables.index(self.variables[axis])
|
| 985 |
+
phi.variables[axis], phi.variables[exchange_index] = (
|
| 986 |
+
phi.variables[exchange_index],
|
| 987 |
+
phi.variables[axis],
|
| 988 |
+
)
|
| 989 |
+
phi.cardinality[axis], phi.cardinality[exchange_index] = (
|
| 990 |
+
phi.cardinality[exchange_index],
|
| 991 |
+
phi.cardinality[axis],
|
| 992 |
+
)
|
| 993 |
+
phi.values = phi.values.swapaxes(axis, exchange_index)
|
| 994 |
+
|
| 995 |
+
# Check the state names order and match them
|
| 996 |
+
for axis, var in enumerate(self.variables):
|
| 997 |
+
if set(self.state_names[var]) != set(phi.state_names[var]):
|
| 998 |
+
return False
|
| 999 |
+
elif self.state_names[var] != phi.state_names[var]:
|
| 1000 |
+
ref_index = []
|
| 1001 |
+
for state_name in self.state_names[var]:
|
| 1002 |
+
ref_index.append(phi.state_names[var].index(state_name))
|
| 1003 |
+
|
| 1004 |
+
slice_ = [slice(None)] * len(self.variables)
|
| 1005 |
+
slice_[axis] = ref_index
|
| 1006 |
+
phi.values = phi.values[tuple(slice_)]
|
| 1007 |
+
|
| 1008 |
+
if phi.values.shape != self.values.shape:
|
| 1009 |
+
return False
|
| 1010 |
+
elif not config.get_compute_backend().allclose(
|
| 1011 |
+
phi.values, self.values, atol=atol
|
| 1012 |
+
):
|
| 1013 |
+
return False
|
| 1014 |
+
elif not all(self.cardinality == phi.cardinality):
|
| 1015 |
+
return False
|
| 1016 |
+
else:
|
| 1017 |
+
return True
|
| 1018 |
+
|
| 1019 |
+
def __ne__(self, other):
|
| 1020 |
+
return not self.__eq__(other)
|
| 1021 |
+
|
| 1022 |
+
def __hash__(self):
|
| 1023 |
+
variable_hashes = [hash(variable) for variable in self.variables]
|
| 1024 |
+
sorted_var_hashes = sorted(variable_hashes)
|
| 1025 |
+
state_names_hash = hash(frozenset(self.state_names))
|
| 1026 |
+
phi = self.copy()
|
| 1027 |
+
for axis in range(phi.values.ndim):
|
| 1028 |
+
exchange_index = variable_hashes.index(sorted_var_hashes[axis])
|
| 1029 |
+
variable_hashes[axis], variable_hashes[exchange_index] = (
|
| 1030 |
+
variable_hashes[exchange_index],
|
| 1031 |
+
variable_hashes[axis],
|
| 1032 |
+
)
|
| 1033 |
+
phi.cardinality[axis], phi.cardinality[exchange_index] = (
|
| 1034 |
+
phi.cardinality[exchange_index],
|
| 1035 |
+
phi.cardinality[axis],
|
| 1036 |
+
)
|
| 1037 |
+
phi.values = phi.values.swapaxes(axis, exchange_index)
|
| 1038 |
+
return hash(
|
| 1039 |
+
str(sorted_var_hashes)
|
| 1040 |
+
+ str(hash(compat_fns.tobytes(phi.values)))
|
| 1041 |
+
+ str(hash(compat_fns.tobytes(phi.cardinality)))
|
| 1042 |
+
+ str(state_names_hash)
|
| 1043 |
+
)
|
testbed/pgmpy__pgmpy/pgmpy/factors/discrete/JointProbabilityDistribution.py
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
from functools import reduce
|
| 3 |
+
from operator import mul
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from pgmpy.factors.discrete import DiscreteFactor
|
| 8 |
+
from pgmpy.independencies import Independencies
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class JointProbabilityDistribution(DiscreteFactor):
|
| 12 |
+
"""
|
| 13 |
+
Base class for Joint Probability Distribution
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, variables, cardinality, values):
|
| 17 |
+
"""
|
| 18 |
+
Initialize a Joint Probability Distribution class.
|
| 19 |
+
|
| 20 |
+
Defined above, we have the following mapping from variable
|
| 21 |
+
assignments to the index of the row vector in the value field:
|
| 22 |
+
|
| 23 |
+
+-----+-----+-----+-------------------------+
|
| 24 |
+
| x1 | x2 | x3 | P(x1, x2, x2) |
|
| 25 |
+
+-----+-----+-----+-------------------------+
|
| 26 |
+
| x1_0| x2_0| x3_0| P(x1_0, x2_0, x3_0) |
|
| 27 |
+
+-----+-----+-----+-------------------------+
|
| 28 |
+
| x1_1| x2_0| x3_0| P(x1_1, x2_0, x3_0) |
|
| 29 |
+
+-----+-----+-----+-------------------------+
|
| 30 |
+
| x1_0| x2_1| x3_0| P(x1_0, x2_1, x3_0) |
|
| 31 |
+
+-----+-----+-----+-------------------------+
|
| 32 |
+
| x1_1| x2_1| x3_0| P(x1_1, x2_1, x3_0) |
|
| 33 |
+
+-----+-----+-----+-------------------------+
|
| 34 |
+
| x1_0| x2_0| x3_1| P(x1_0, x2_0, x3_1) |
|
| 35 |
+
+-----+-----+-----+-------------------------+
|
| 36 |
+
| x1_1| x2_0| x3_1| P(x1_1, x2_0, x3_1) |
|
| 37 |
+
+-----+-----+-----+-------------------------+
|
| 38 |
+
| x1_0| x2_1| x3_1| P(x1_0, x2_1, x3_1) |
|
| 39 |
+
+-----+-----+-----+-------------------------+
|
| 40 |
+
| x1_1| x2_1| x3_1| P(x1_1, x2_1, x3_1) |
|
| 41 |
+
+-----+-----+-----+-------------------------+
|
| 42 |
+
|
| 43 |
+
Parameters
|
| 44 |
+
----------
|
| 45 |
+
variables: list
|
| 46 |
+
List of scope of Joint Probability Distribution.
|
| 47 |
+
cardinality: list, array_like
|
| 48 |
+
List of cardinality of each variable
|
| 49 |
+
value: list, array_like
|
| 50 |
+
List or array of values of factor.
|
| 51 |
+
A Joint Probability Distribution's values are stored in a row
|
| 52 |
+
vector in the value using an ordering such that the left-most
|
| 53 |
+
variables as defined in the variable field cycle through their
|
| 54 |
+
values the fastest.
|
| 55 |
+
|
| 56 |
+
Examples
|
| 57 |
+
--------
|
| 58 |
+
>>> import numpy as np
|
| 59 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 60 |
+
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)
|
| 61 |
+
>>> print(prob)
|
| 62 |
+
x1 x2 x3 P(x1,x2,x3)
|
| 63 |
+
---- ---- ---- -------------
|
| 64 |
+
x1_0 x2_0 x3_0 0.1250
|
| 65 |
+
x1_0 x2_0 x3_1 0.1250
|
| 66 |
+
x1_0 x2_1 x3_0 0.1250
|
| 67 |
+
x1_0 x2_1 x3_1 0.1250
|
| 68 |
+
x1_1 x2_0 x3_0 0.1250
|
| 69 |
+
x1_1 x2_0 x3_1 0.1250
|
| 70 |
+
x1_1 x2_1 x3_0 0.1250
|
| 71 |
+
x1_1 x2_1 x3_1 0.1250
|
| 72 |
+
"""
|
| 73 |
+
if np.isclose(np.sum(values), 1):
|
| 74 |
+
super(JointProbabilityDistribution, self).__init__(
|
| 75 |
+
variables, cardinality, values
|
| 76 |
+
)
|
| 77 |
+
else:
|
| 78 |
+
raise ValueError("The probability values doesn't sum to 1.")
|
| 79 |
+
|
| 80 |
+
def __repr__(self):
|
| 81 |
+
var_card = ", ".join(
|
| 82 |
+
[f"{var}:{card}" for var, card in zip(self.variables, self.cardinality)]
|
| 83 |
+
)
|
| 84 |
+
return f"<Joint Distribution representing P({var_card}) at {hex(id(self))}>"
|
| 85 |
+
|
| 86 |
+
def __str__(self):
|
| 87 |
+
return self._str(phi_or_p="P")
|
| 88 |
+
|
| 89 |
+
def marginal_distribution(self, variables, inplace=True):
|
| 90 |
+
"""
|
| 91 |
+
Returns the marginal distribution over variables.
|
| 92 |
+
|
| 93 |
+
Parameters
|
| 94 |
+
----------
|
| 95 |
+
variables: string, list, tuple, set, dict
|
| 96 |
+
Variable or list of variables over which marginal distribution needs
|
| 97 |
+
to be calculated
|
| 98 |
+
inplace: Boolean (default True)
|
| 99 |
+
If False return a new instance of JointProbabilityDistribution
|
| 100 |
+
|
| 101 |
+
Examples
|
| 102 |
+
--------
|
| 103 |
+
>>> import numpy as np
|
| 104 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 105 |
+
>>> values = np.random.rand(12)
|
| 106 |
+
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], values/np.sum(values))
|
| 107 |
+
>>> prob.marginal_distribution(['x1', 'x2'])
|
| 108 |
+
>>> print(prob)
|
| 109 |
+
x1 x2 P(x1,x2)
|
| 110 |
+
---- ---- ----------
|
| 111 |
+
x1_0 x2_0 0.1502
|
| 112 |
+
x1_0 x2_1 0.1626
|
| 113 |
+
x1_0 x2_2 0.1197
|
| 114 |
+
x1_1 x2_0 0.2339
|
| 115 |
+
x1_1 x2_1 0.1996
|
| 116 |
+
x1_1 x2_2 0.1340
|
| 117 |
+
"""
|
| 118 |
+
return self.marginalize(
|
| 119 |
+
list(
|
| 120 |
+
set(list(self.variables))
|
| 121 |
+
- set(
|
| 122 |
+
variables
|
| 123 |
+
if isinstance(variables, (list, set, dict, tuple))
|
| 124 |
+
else [variables]
|
| 125 |
+
)
|
| 126 |
+
),
|
| 127 |
+
inplace=inplace,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
def check_independence(
|
| 131 |
+
self, event1, event2, event3=None, condition_random_variable=False
|
| 132 |
+
):
|
| 133 |
+
"""
|
| 134 |
+
Check if the Joint Probability Distribution satisfies the given independence condition.
|
| 135 |
+
|
| 136 |
+
Parameters
|
| 137 |
+
----------
|
| 138 |
+
event1: list
|
| 139 |
+
random variable whose independence is to be checked.
|
| 140 |
+
event2: list
|
| 141 |
+
random variable from which event1 is independent.
|
| 142 |
+
values: 2D array or list like or 1D array or list like
|
| 143 |
+
A 2D list of tuples of the form (variable_name, variable_state).
|
| 144 |
+
A 1D list or array-like to condition over randome variables (condition_random_variable must be True)
|
| 145 |
+
The values on which to condition the Joint Probability Distribution.
|
| 146 |
+
condition_random_variable: Boolean (Default false)
|
| 147 |
+
If true and event3 is not None than will check independence condition over random variable.
|
| 148 |
+
|
| 149 |
+
For random variables say X, Y, Z to check if X is independent of Y given Z.
|
| 150 |
+
event1 should be either X or Y.
|
| 151 |
+
event2 should be either Y or X.
|
| 152 |
+
event3 should Z.
|
| 153 |
+
|
| 154 |
+
Examples
|
| 155 |
+
--------
|
| 156 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
|
| 157 |
+
>>> prob = JPD(['I','D','G'],[2,2,3],
|
| 158 |
+
[0.126,0.168,0.126,0.009,0.045,0.126,0.252,0.0224,0.0056,0.06,0.036,0.024])
|
| 159 |
+
>>> prob.check_independence(['I'], ['D'])
|
| 160 |
+
True
|
| 161 |
+
>>> prob.check_independence(['I'], ['D'], [('G', 1)]) # Conditioning over G_1
|
| 162 |
+
False
|
| 163 |
+
>>> # Conditioning over random variable G
|
| 164 |
+
>>> prob.check_independence(['I'], ['D'], ('G',), condition_random_variable=True)
|
| 165 |
+
False
|
| 166 |
+
"""
|
| 167 |
+
JPD = self.copy()
|
| 168 |
+
if isinstance(event1, str):
|
| 169 |
+
raise TypeError("Event 1 should be a list or array-like structure")
|
| 170 |
+
|
| 171 |
+
if isinstance(event2, str):
|
| 172 |
+
raise TypeError("Event 2 should be a list or array-like structure")
|
| 173 |
+
|
| 174 |
+
if event3:
|
| 175 |
+
if isinstance(event3, str):
|
| 176 |
+
raise TypeError("Event 3 cannot of type string")
|
| 177 |
+
|
| 178 |
+
elif condition_random_variable:
|
| 179 |
+
if not all(isinstance(var, str) for var in event3):
|
| 180 |
+
raise TypeError("event3 should be a 1d list of strings")
|
| 181 |
+
event3 = list(event3)
|
| 182 |
+
# Using the definition of conditional independence
|
| 183 |
+
# If P(X,Y|Z) = P(X|Z)*P(Y|Z)
|
| 184 |
+
# This can be expanded to P(X,Y,Z)*P(Z) == P(X,Z)*P(Y,Z)
|
| 185 |
+
phi_z = JPD.marginal_distribution(event3, inplace=False).to_factor()
|
| 186 |
+
for variable_pair in itertools.product(event1, event2):
|
| 187 |
+
phi_xyz = JPD.marginal_distribution(
|
| 188 |
+
event3 + list(variable_pair), inplace=False
|
| 189 |
+
).to_factor()
|
| 190 |
+
phi_xz = JPD.marginal_distribution(
|
| 191 |
+
event3 + [variable_pair[0]], inplace=False
|
| 192 |
+
).to_factor()
|
| 193 |
+
phi_yz = JPD.marginal_distribution(
|
| 194 |
+
event3 + [variable_pair[1]], inplace=False
|
| 195 |
+
).to_factor()
|
| 196 |
+
if phi_xyz * phi_z != phi_xz * phi_yz:
|
| 197 |
+
return False
|
| 198 |
+
return True
|
| 199 |
+
else:
|
| 200 |
+
JPD.conditional_distribution(event3)
|
| 201 |
+
|
| 202 |
+
for variable_pair in itertools.product(event1, event2):
|
| 203 |
+
if JPD.marginal_distribution(
|
| 204 |
+
variable_pair, inplace=False
|
| 205 |
+
) != JPD.marginal_distribution(
|
| 206 |
+
variable_pair[0], inplace=False
|
| 207 |
+
) * JPD.marginal_distribution(
|
| 208 |
+
variable_pair[1], inplace=False
|
| 209 |
+
):
|
| 210 |
+
return False
|
| 211 |
+
return True
|
| 212 |
+
|
| 213 |
+
def get_independencies(self, condition=None):
|
| 214 |
+
"""
|
| 215 |
+
Returns the independent variables in the joint probability distribution.
|
| 216 |
+
Returns marginally independent variables if condition=None.
|
| 217 |
+
Returns conditionally independent variables if condition!=None
|
| 218 |
+
|
| 219 |
+
Parameters
|
| 220 |
+
----------
|
| 221 |
+
condition: array_like
|
| 222 |
+
Random Variable on which to condition the Joint Probability Distribution.
|
| 223 |
+
|
| 224 |
+
Examples
|
| 225 |
+
--------
|
| 226 |
+
>>> import numpy as np
|
| 227 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 228 |
+
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
|
| 229 |
+
>>> prob.get_independencies()
|
| 230 |
+
(x1 \u27C2 x2)
|
| 231 |
+
(x1 \u27C2 x3)
|
| 232 |
+
(x2 \u27C2 x3)
|
| 233 |
+
"""
|
| 234 |
+
JPD = self.copy()
|
| 235 |
+
if condition:
|
| 236 |
+
JPD.conditional_distribution(condition)
|
| 237 |
+
independencies = Independencies()
|
| 238 |
+
for variable_pair in itertools.combinations(list(JPD.variables), 2):
|
| 239 |
+
if JPD.marginal_distribution(
|
| 240 |
+
variable_pair, inplace=False
|
| 241 |
+
) == JPD.marginal_distribution(
|
| 242 |
+
variable_pair[0], inplace=False
|
| 243 |
+
) * JPD.marginal_distribution(
|
| 244 |
+
variable_pair[1], inplace=False
|
| 245 |
+
):
|
| 246 |
+
independencies.add_assertions(variable_pair)
|
| 247 |
+
return independencies
|
| 248 |
+
|
| 249 |
+
def conditional_distribution(self, values, inplace=True):
|
| 250 |
+
"""
|
| 251 |
+
Returns Conditional Probability Distribution after setting values to 1.
|
| 252 |
+
|
| 253 |
+
Parameters
|
| 254 |
+
----------
|
| 255 |
+
values: list or array_like
|
| 256 |
+
A list of tuples of the form (variable_name, variable_state).
|
| 257 |
+
The values on which to condition the Joint Probability Distribution.
|
| 258 |
+
inplace: Boolean (default True)
|
| 259 |
+
If False returns a new instance of JointProbabilityDistribution
|
| 260 |
+
|
| 261 |
+
Examples
|
| 262 |
+
--------
|
| 263 |
+
>>> import numpy as np
|
| 264 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 265 |
+
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)
|
| 266 |
+
>>> prob.conditional_distribution([('x1', 1)])
|
| 267 |
+
>>> print(prob)
|
| 268 |
+
x2 x3 P(x2,x3)
|
| 269 |
+
---- ---- ----------
|
| 270 |
+
x2_0 x3_0 0.2500
|
| 271 |
+
x2_0 x3_1 0.2500
|
| 272 |
+
x2_1 x3_0 0.2500
|
| 273 |
+
x2_1 x3_1 0.2500
|
| 274 |
+
"""
|
| 275 |
+
JPD = self if inplace else self.copy()
|
| 276 |
+
JPD.reduce(values)
|
| 277 |
+
JPD.normalize()
|
| 278 |
+
if not inplace:
|
| 279 |
+
return JPD
|
| 280 |
+
|
| 281 |
+
def copy(self):
|
| 282 |
+
"""
|
| 283 |
+
Returns A copy of JointProbabilityDistribution object
|
| 284 |
+
|
| 285 |
+
Examples
|
| 286 |
+
---------
|
| 287 |
+
>>> import numpy as np
|
| 288 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 289 |
+
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
|
| 290 |
+
>>> prob_copy = prob.copy()
|
| 291 |
+
>>> prob_copy.values == prob.values
|
| 292 |
+
True
|
| 293 |
+
>>> prob_copy.variables == prob.variables
|
| 294 |
+
True
|
| 295 |
+
>>> prob_copy.variables[1] = 'y'
|
| 296 |
+
>>> prob_copy.variables == prob.variables
|
| 297 |
+
False
|
| 298 |
+
"""
|
| 299 |
+
return JointProbabilityDistribution(self.scope(), self.cardinality, self.values)
|
| 300 |
+
|
| 301 |
+
def minimal_imap(self, order):
|
| 302 |
+
"""
|
| 303 |
+
Returns a Bayesian Model which is minimal IMap of the Joint Probability Distribution
|
| 304 |
+
considering the order of the variables.
|
| 305 |
+
|
| 306 |
+
Parameters
|
| 307 |
+
----------
|
| 308 |
+
order: array-like
|
| 309 |
+
The order of the random variables.
|
| 310 |
+
|
| 311 |
+
Examples
|
| 312 |
+
--------
|
| 313 |
+
>>> import numpy as np
|
| 314 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 315 |
+
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
|
| 316 |
+
>>> bayesian_model = prob.minimal_imap(order=['x2', 'x1', 'x3'])
|
| 317 |
+
>>> bayesian_model
|
| 318 |
+
<pgmpy.models.models.models at 0x7fd7440a9320>
|
| 319 |
+
>>> bayesian_model.edges()
|
| 320 |
+
[('x1', 'x3'), ('x2', 'x3')]
|
| 321 |
+
"""
|
| 322 |
+
from pgmpy.models import BayesianNetwork
|
| 323 |
+
|
| 324 |
+
def get_subsets(u):
|
| 325 |
+
for r in range(len(u) + 1):
|
| 326 |
+
for i in itertools.combinations(u, r):
|
| 327 |
+
yield i
|
| 328 |
+
|
| 329 |
+
G = BayesianNetwork()
|
| 330 |
+
for variable_index in range(len(order)):
|
| 331 |
+
u = order[:variable_index]
|
| 332 |
+
for subset in get_subsets(u):
|
| 333 |
+
if len(subset) < len(u) and self.check_independence(
|
| 334 |
+
[order[variable_index]], set(u) - set(subset), subset, True
|
| 335 |
+
):
|
| 336 |
+
G.add_edges_from(
|
| 337 |
+
[(variable, order[variable_index]) for variable in subset]
|
| 338 |
+
)
|
| 339 |
+
return G
|
| 340 |
+
|
| 341 |
+
def is_imap(self, model):
|
| 342 |
+
"""
|
| 343 |
+
Checks whether the given BayesianNetwork is Imap of JointProbabilityDistribution
|
| 344 |
+
|
| 345 |
+
Parameters
|
| 346 |
+
----------
|
| 347 |
+
model : An instance of BayesianNetwork Class, for which you want to
|
| 348 |
+
check the Imap
|
| 349 |
+
|
| 350 |
+
Returns
|
| 351 |
+
-------
|
| 352 |
+
Is IMAP: bool
|
| 353 |
+
True if given Bayesian Network is Imap for Joint Probability Distribution False otherwise
|
| 354 |
+
|
| 355 |
+
Examples
|
| 356 |
+
--------
|
| 357 |
+
>>> from pgmpy.models import BayesianNetwork
|
| 358 |
+
>>> from pgmpy.factors.discrete import TabularCPD
|
| 359 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 360 |
+
>>> bm = BayesianNetwork([('diff', 'grade'), ('intel', 'grade')])
|
| 361 |
+
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
|
| 362 |
+
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
|
| 363 |
+
>>> grade_cpd = TabularCPD('grade', 3,
|
| 364 |
+
... [[0.1,0.1,0.1,0.1,0.1,0.1],
|
| 365 |
+
... [0.1,0.1,0.1,0.1,0.1,0.1],
|
| 366 |
+
... [0.8,0.8,0.8,0.8,0.8,0.8]],
|
| 367 |
+
... evidence=['diff', 'intel'],
|
| 368 |
+
... evidence_card=[2, 3])
|
| 369 |
+
>>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd)
|
| 370 |
+
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
|
| 371 |
+
... 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
|
| 372 |
+
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
|
| 373 |
+
>>> JPD.is_imap(bm)
|
| 374 |
+
True
|
| 375 |
+
"""
|
| 376 |
+
from pgmpy.models import BayesianNetwork
|
| 377 |
+
|
| 378 |
+
if not isinstance(model, BayesianNetwork):
|
| 379 |
+
raise TypeError("model must be an instance of BayesianNetwork")
|
| 380 |
+
factors = [cpd.to_factor() for cpd in model.get_cpds()]
|
| 381 |
+
factor_prod = reduce(mul, factors)
|
| 382 |
+
JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values)
|
| 383 |
+
if JPD_fact == factor_prod:
|
| 384 |
+
return True
|
| 385 |
+
else:
|
| 386 |
+
return False
|
| 387 |
+
|
| 388 |
+
def to_factor(self):
|
| 389 |
+
"""
|
| 390 |
+
Returns JointProbabilityDistribution as a DiscreteFactor object
|
| 391 |
+
|
| 392 |
+
Examples
|
| 393 |
+
--------
|
| 394 |
+
>>> import numpy as np
|
| 395 |
+
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
|
| 396 |
+
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
|
| 397 |
+
>>> phi = prob.to_factor()
|
| 398 |
+
>>> type(phi)
|
| 399 |
+
pgmpy.factors.DiscreteFactor.DiscreteFactor
|
| 400 |
+
"""
|
| 401 |
+
return DiscreteFactor(self.variables, self.cardinality, self.values)
|
| 402 |
+
|
| 403 |
+
def pmap(self):
|
| 404 |
+
pass
|
testbed/pgmpy__pgmpy/pgmpy/factors/discrete/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .DiscreteFactor import DiscreteFactor, State
|
| 2 |
+
from .CPD import TabularCPD
|
| 3 |
+
from .JointProbabilityDistribution import JointProbabilityDistribution
|
| 4 |
+
|
| 5 |
+
__all__ = ["TabularCPD", "State", "DiscreteFactor"]
|
testbed/pgmpy__pgmpy/pgmpy/factors/distributions/CanonicalDistribution.py
ADDED
|
@@ -0,0 +1,614 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
from pgmpy.factors.distributions import BaseDistribution
|
| 5 |
+
from pgmpy.factors.distributions import GaussianDistribution
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class CanonicalDistribution(BaseDistribution):
|
| 9 |
+
"""
|
| 10 |
+
The intermediate factors in a Gaussian network can be described
|
| 11 |
+
compactly using a simple parametric representation called the
|
| 12 |
+
canonical form. This representation is closed under the basic
|
| 13 |
+
operations used in inference: factor product, factor division,
|
| 14 |
+
factor reduction, and marginalization. Thus, we define this
|
| 15 |
+
CanonicalDistribution class that allows the inference process to be
|
| 16 |
+
performed on joint Gaussian networks.
|
| 17 |
+
|
| 18 |
+
A canonical form C (X; K,h,g) is defined as
|
| 19 |
+
|
| 20 |
+
C (X; K,h,g) = exp( ((-1/2) * X.T * K * X) + (h.T * X) + g)
|
| 21 |
+
|
| 22 |
+
References
|
| 23 |
+
----------
|
| 24 |
+
Probabilistic Graphical Models, Principles and Techniques,
|
| 25 |
+
Daphne Koller and Nir Friedman, Section 14.2, Chapter 14.
|
| 26 |
+
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, variables, K, h, g):
|
| 30 |
+
"""
|
| 31 |
+
Parameters
|
| 32 |
+
----------
|
| 33 |
+
variables: list or array-like
|
| 34 |
+
The variables for wich the distribution is defined.
|
| 35 |
+
|
| 36 |
+
K: n x n, 2-d array-like
|
| 37 |
+
|
| 38 |
+
h : n x 1, array-like
|
| 39 |
+
|
| 40 |
+
g : int, float
|
| 41 |
+
|
| 42 |
+
pdf: function
|
| 43 |
+
The probability density function of the distribution.
|
| 44 |
+
|
| 45 |
+
The terms K, h and g are defined parameters for canonical
|
| 46 |
+
factors representation.
|
| 47 |
+
|
| 48 |
+
Examples
|
| 49 |
+
--------
|
| 50 |
+
>>> from pgmpy.factors.continuous import CanonicalDistribution
|
| 51 |
+
>>> phi = CanonicalDistribution(['X', 'Y'], np.array([[1, -1], [-1, 1]]),
|
| 52 |
+
np.array([[1], [-1]]), -3)
|
| 53 |
+
>>> phi.variables
|
| 54 |
+
['X', 'Y']
|
| 55 |
+
|
| 56 |
+
>>> phi.K
|
| 57 |
+
array([[1, -1],
|
| 58 |
+
[-1, 1]])
|
| 59 |
+
|
| 60 |
+
>>> phi.h
|
| 61 |
+
array([[1],
|
| 62 |
+
[-1]])
|
| 63 |
+
|
| 64 |
+
>>> phi.g
|
| 65 |
+
-3
|
| 66 |
+
|
| 67 |
+
"""
|
| 68 |
+
no_of_var = len(variables)
|
| 69 |
+
|
| 70 |
+
if len(h) != no_of_var:
|
| 71 |
+
raise ValueError(
|
| 72 |
+
"Length of h parameter vector must be equal to "
|
| 73 |
+
"the number of variables."
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
self.variables = variables
|
| 77 |
+
self.h = np.asarray(np.reshape(h, (no_of_var, 1)), dtype=float)
|
| 78 |
+
self.g = g
|
| 79 |
+
self.K = np.asarray(K, dtype=float)
|
| 80 |
+
|
| 81 |
+
if self.K.shape != (no_of_var, no_of_var):
|
| 82 |
+
raise ValueError(
|
| 83 |
+
f"The K matrix should be a square matrix with order equal to the number of variables. Got: {self.K.shape}, Expected: {(no_of_var, no_of_var)}"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def pdf(self):
|
| 88 |
+
def fun(*args):
|
| 89 |
+
x = np.array(args)
|
| 90 |
+
return np.exp(
|
| 91 |
+
self.g + np.dot(x, self.h)[0] - 0.5 * np.dot(x.T, np.dot(self.K, x))
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
return fun
|
| 95 |
+
|
| 96 |
+
def assignment(self, *x):
|
| 97 |
+
"""
|
| 98 |
+
Returns the probability value of the PDF at the given parameter values.
|
| 99 |
+
Parameters
|
| 100 |
+
----------
|
| 101 |
+
*x: values of all variables of this distribution,
|
| 102 |
+
collective defining a point at which the probability value is to be computed.
|
| 103 |
+
Returns
|
| 104 |
+
-------
|
| 105 |
+
float: The probability value at the point.
|
| 106 |
+
Examples
|
| 107 |
+
--------
|
| 108 |
+
>>> from pgmpy.factors.distributions import GaussianDistribution
|
| 109 |
+
>>> dist = GaussianDistribution(variables=['x1', 'x2'],
|
| 110 |
+
... mean=[[0], [0]],
|
| 111 |
+
... cov=[[1, 0], [0, 1]])
|
| 112 |
+
>>> dist.assignment(0, 0)
|
| 113 |
+
0.15915494309189535
|
| 114 |
+
"""
|
| 115 |
+
return self.pdf(*x)
|
| 116 |
+
|
| 117 |
+
def copy(self):
|
| 118 |
+
"""
|
| 119 |
+
Makes a copy of the factor.
|
| 120 |
+
|
| 121 |
+
Returns
|
| 122 |
+
-------
|
| 123 |
+
CanonicalDistribution object: Copy of the factor
|
| 124 |
+
|
| 125 |
+
Examples
|
| 126 |
+
--------
|
| 127 |
+
>>> from pgmpy.factors.continuous import CanonicalDistribution
|
| 128 |
+
>>> phi = CanonicalDistribution(['X', 'Y'], np.array([[1, -1], [-1, 1]]),
|
| 129 |
+
np.array([[1], [-1]]), -3)
|
| 130 |
+
>>> phi.variables
|
| 131 |
+
['X', 'Y']
|
| 132 |
+
|
| 133 |
+
>>> phi.K
|
| 134 |
+
array([[1, -1],
|
| 135 |
+
[-1, 1]])
|
| 136 |
+
|
| 137 |
+
>>> phi.h
|
| 138 |
+
array([[1],
|
| 139 |
+
[-1]])
|
| 140 |
+
|
| 141 |
+
>>> phi.g
|
| 142 |
+
-3
|
| 143 |
+
|
| 144 |
+
>>> phi2 = phi.copy()
|
| 145 |
+
|
| 146 |
+
>>> phi2.variables
|
| 147 |
+
['X', 'Y']
|
| 148 |
+
|
| 149 |
+
>>> phi2.K
|
| 150 |
+
array([[1, -1],
|
| 151 |
+
[-1, 1]])
|
| 152 |
+
|
| 153 |
+
>>> phi2.h
|
| 154 |
+
array([[1],
|
| 155 |
+
[-1]])
|
| 156 |
+
|
| 157 |
+
>>> phi2.g
|
| 158 |
+
-3
|
| 159 |
+
|
| 160 |
+
"""
|
| 161 |
+
copy_factor = CanonicalDistribution(
|
| 162 |
+
self.variables, self.K.copy(), self.h.copy(), self.g
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
return copy_factor
|
| 166 |
+
|
| 167 |
+
def to_joint_gaussian(self):
|
| 168 |
+
"""
|
| 169 |
+
Return an equivalent Joint Gaussian Distribution.
|
| 170 |
+
|
| 171 |
+
Examples
|
| 172 |
+
--------
|
| 173 |
+
|
| 174 |
+
>>> import numpy as np
|
| 175 |
+
>>> from pgmpy.factors.continuous import CanonicalDistribution
|
| 176 |
+
>>> phi = CanonicalDistribution(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
|
| 177 |
+
np.array([[5], [-1]]), 1)
|
| 178 |
+
>>> jgd = phi.to_joint_gaussian()
|
| 179 |
+
>>> jgd.variables
|
| 180 |
+
['x1', 'x2']
|
| 181 |
+
>>> jgd.covariance
|
| 182 |
+
array([[ 0.5 , 0.25 ],
|
| 183 |
+
[ 0.25 , 0.375]])
|
| 184 |
+
>>> jgd.mean
|
| 185 |
+
array([[ 2.25 ],
|
| 186 |
+
[ 0.875]])
|
| 187 |
+
|
| 188 |
+
"""
|
| 189 |
+
covariance = np.linalg.inv(self.K)
|
| 190 |
+
mean = np.dot(covariance, self.h)
|
| 191 |
+
|
| 192 |
+
return GaussianDistribution(self.variables, mean, covariance)
|
| 193 |
+
|
| 194 |
+
def reduce(self, values, inplace=True):
|
| 195 |
+
"""
|
| 196 |
+
Reduces the distribution to the context of the given variable values.
|
| 197 |
+
|
| 198 |
+
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
|
| 199 |
+
|
| 200 |
+
k = [[K_XX, K_XY], ; h = [[h_X],
|
| 201 |
+
[K_YX, K_YY]] [h_Y]]
|
| 202 |
+
|
| 203 |
+
The formula for the obtained conditional distribution for setting
|
| 204 |
+
Y = y is given by,
|
| 205 |
+
|
| 206 |
+
.. math:: K' = K_{XX}
|
| 207 |
+
.. math:: h' = h_X - K_{XY} * y
|
| 208 |
+
.. math:: g' = g + {h^T}_Y * y - 0.5 * y^T * K_{YY} * y
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
Parameters
|
| 212 |
+
----------
|
| 213 |
+
values: list, array-like
|
| 214 |
+
A list of tuples of the form (variable name, variable value).
|
| 215 |
+
|
| 216 |
+
inplace: boolean
|
| 217 |
+
If inplace=True it will modify the factor itself, else would return
|
| 218 |
+
a new CanonicalFactor object.
|
| 219 |
+
|
| 220 |
+
Returns
|
| 221 |
+
-------
|
| 222 |
+
CanonicalDistribution or None:
|
| 223 |
+
if inplace=True (default) returns None
|
| 224 |
+
if inplace=False returns a new CanonicalDistribution instance.
|
| 225 |
+
|
| 226 |
+
Examples
|
| 227 |
+
--------
|
| 228 |
+
>>> import numpy as np
|
| 229 |
+
>>> from pgmpy.factors.continuous import CanonicalDistribution
|
| 230 |
+
>>> phi = CanonicalDistribution(['X1', 'X2', 'X3'],
|
| 231 |
+
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
|
| 232 |
+
... np.array([[1], [4], [-1]]), -2)
|
| 233 |
+
>>> phi.variables
|
| 234 |
+
['X1', 'X2', 'X3']
|
| 235 |
+
|
| 236 |
+
>>> phi.K
|
| 237 |
+
array([[ 1., -1.],
|
| 238 |
+
[-1., 3.]])
|
| 239 |
+
|
| 240 |
+
>>> phi.h
|
| 241 |
+
array([[ 1. ],
|
| 242 |
+
[ 3.5]])
|
| 243 |
+
|
| 244 |
+
>>> phi.g
|
| 245 |
+
-2
|
| 246 |
+
|
| 247 |
+
>>> phi.reduce([('X3', 0.25)])
|
| 248 |
+
|
| 249 |
+
>>> phi.variables
|
| 250 |
+
['X1', 'X2']
|
| 251 |
+
|
| 252 |
+
>>> phi.K
|
| 253 |
+
array([[ 1, -1],
|
| 254 |
+
[-1, 4]])
|
| 255 |
+
|
| 256 |
+
>>> phi.h
|
| 257 |
+
array([[ 1. ],
|
| 258 |
+
[ 4.5]])
|
| 259 |
+
|
| 260 |
+
>>> phi.g
|
| 261 |
+
-2.375
|
| 262 |
+
"""
|
| 263 |
+
if not isinstance(values, (list, tuple, np.ndarray)):
|
| 264 |
+
raise TypeError(
|
| 265 |
+
f"variables: Expected type list or array-like, got type {type(values)}"
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
if not all([var in self.variables for var, value in values]):
|
| 269 |
+
raise ValueError("Variable not in scope.")
|
| 270 |
+
|
| 271 |
+
phi = self if inplace else self.copy()
|
| 272 |
+
|
| 273 |
+
var_to_reduce = [var for var, value in values]
|
| 274 |
+
|
| 275 |
+
# index_to_keep -> j vector
|
| 276 |
+
index_to_keep = [
|
| 277 |
+
self.variables.index(var)
|
| 278 |
+
for var in self.variables
|
| 279 |
+
if var not in var_to_reduce
|
| 280 |
+
]
|
| 281 |
+
# index_to_reduce -> i vector
|
| 282 |
+
index_to_reduce = [self.variables.index(var) for var in var_to_reduce]
|
| 283 |
+
|
| 284 |
+
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
|
| 285 |
+
K_i_j = self.K[np.ix_(index_to_keep, index_to_reduce)]
|
| 286 |
+
K_j_j = self.K[np.ix_(index_to_reduce, index_to_reduce)]
|
| 287 |
+
h_i = self.h[index_to_keep]
|
| 288 |
+
h_j = self.h[index_to_reduce]
|
| 289 |
+
|
| 290 |
+
# The values for the reduced variables.
|
| 291 |
+
y = np.array([value for var, value in values]).reshape(len(index_to_reduce), 1)
|
| 292 |
+
|
| 293 |
+
phi.variables = [self.variables[index] for index in index_to_keep]
|
| 294 |
+
phi.K = K_i_i
|
| 295 |
+
phi.h = h_i - np.dot(K_i_j, y)
|
| 296 |
+
phi.g = (
|
| 297 |
+
self.g
|
| 298 |
+
+ (np.dot(h_j.T, y) - (0.5 * np.linalg.multi_dot([y.T, K_j_j, y])))[0][0]
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
if not inplace:
|
| 302 |
+
return phi
|
| 303 |
+
|
| 304 |
+
def marginalize(self, variables, inplace=True):
|
| 305 |
+
"""
|
| 306 |
+
Modifies the factor with marginalized values.
|
| 307 |
+
|
| 308 |
+
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
|
| 309 |
+
|
| 310 |
+
k = [[K_XX, K_XY], ; h = [[h_X],
|
| 311 |
+
[K_YX, K_YY]] [h_Y]]
|
| 312 |
+
|
| 313 |
+
In this case, the result of the integration operation is a canonical
|
| 314 |
+
from C (K', h', g') given by,
|
| 315 |
+
|
| 316 |
+
.. math:: K' = K_{XX} - K_{XY} * {K^{-1}}_{YY} * K_YX
|
| 317 |
+
.. math:: h' = h_X - K_{XY} * {K^{-1}}_{YY} * h_Y
|
| 318 |
+
.. math:: g' = g + 0.5 * (|Y| * log(2*pi) - log(|K_{YY}|) + {h^T}_Y * K_{YY} * h_Y)
|
| 319 |
+
|
| 320 |
+
Parameters
|
| 321 |
+
----------
|
| 322 |
+
|
| 323 |
+
variables: list or array-like
|
| 324 |
+
List of variables over which to marginalize.
|
| 325 |
+
|
| 326 |
+
inplace: boolean
|
| 327 |
+
If inplace=True it will modify the distribution itself,
|
| 328 |
+
else would return a new distribution.
|
| 329 |
+
|
| 330 |
+
Returns
|
| 331 |
+
-------
|
| 332 |
+
CanonicalDistribution or None :
|
| 333 |
+
if inplace=True (default) returns None
|
| 334 |
+
if inplace=False return a new CanonicalDistribution instance
|
| 335 |
+
|
| 336 |
+
Examples
|
| 337 |
+
--------
|
| 338 |
+
>>> import numpy as np
|
| 339 |
+
>>> from pgmpy.factors.continuous import CanonicalDistribution
|
| 340 |
+
>>> phi = CanonicalDistribution(['X1', 'X2', 'X3'],
|
| 341 |
+
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
|
| 342 |
+
... np.array([[1], [4], [-1]]), -2)
|
| 343 |
+
>>> phi.K
|
| 344 |
+
array([[ 1, -1, 0],
|
| 345 |
+
[-1, 4, -2],
|
| 346 |
+
[ 0, -2, 4]])
|
| 347 |
+
|
| 348 |
+
>>> phi.h
|
| 349 |
+
array([[ 1],
|
| 350 |
+
[ 4],
|
| 351 |
+
[-1]])
|
| 352 |
+
|
| 353 |
+
>>> phi.g
|
| 354 |
+
-2
|
| 355 |
+
|
| 356 |
+
>>> phi.marginalize(['X3'])
|
| 357 |
+
|
| 358 |
+
>>> phi.K
|
| 359 |
+
array([[ 1., -1.],
|
| 360 |
+
[-1., 3.]])
|
| 361 |
+
|
| 362 |
+
>>> phi.h
|
| 363 |
+
array([[ 1. ],
|
| 364 |
+
[ 3.5]])
|
| 365 |
+
|
| 366 |
+
>>> phi.g
|
| 367 |
+
0.22579135
|
| 368 |
+
"""
|
| 369 |
+
if not isinstance(variables, (list, tuple, np.ndarray)):
|
| 370 |
+
raise TypeError(
|
| 371 |
+
f"variables: Expected type list or array-like, got type {type(variables)}"
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
if not all([var in self.variables for var in variables]):
|
| 375 |
+
raise ValueError("Variable not in scope.")
|
| 376 |
+
|
| 377 |
+
phi = self if inplace else self.copy()
|
| 378 |
+
|
| 379 |
+
# index_to_keep -> i vector
|
| 380 |
+
index_to_keep = [
|
| 381 |
+
self.variables.index(var) for var in self.variables if var not in variables
|
| 382 |
+
]
|
| 383 |
+
# index_to_marginalize -> j vector
|
| 384 |
+
index_to_marginalize = [self.variables.index(var) for var in variables]
|
| 385 |
+
|
| 386 |
+
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
|
| 387 |
+
K_i_j = self.K[np.ix_(index_to_keep, index_to_marginalize)]
|
| 388 |
+
K_j_i = self.K[np.ix_(index_to_marginalize, index_to_keep)]
|
| 389 |
+
K_j_j = self.K[np.ix_(index_to_marginalize, index_to_marginalize)]
|
| 390 |
+
K_j_j_inv = np.linalg.inv(K_j_j)
|
| 391 |
+
h_i = self.h[index_to_keep]
|
| 392 |
+
h_j = self.h[index_to_marginalize]
|
| 393 |
+
|
| 394 |
+
phi.variables = [self.variables[index] for index in index_to_keep]
|
| 395 |
+
|
| 396 |
+
phi.K = K_i_i - np.linalg.multi_dot([K_i_j, K_j_j_inv, K_j_i])
|
| 397 |
+
phi.h = h_i - np.linalg.multi_dot([K_i_j, K_j_j_inv, h_j])
|
| 398 |
+
phi.g = (
|
| 399 |
+
self.g
|
| 400 |
+
+ 0.5
|
| 401 |
+
* (
|
| 402 |
+
len(variables) * np.log(2 * np.pi)
|
| 403 |
+
- np.log(abs(np.linalg.det(K_j_j)))
|
| 404 |
+
+ np.linalg.multi_dot([h_j.T, K_j_j, h_j])
|
| 405 |
+
)[0][0]
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
if not inplace:
|
| 409 |
+
return phi
|
| 410 |
+
|
| 411 |
+
def _operate(self, other, operation, inplace=True):
|
| 412 |
+
"""
|
| 413 |
+
Gives the CanonicalDistribution operation (product or divide) with
|
| 414 |
+
the other factor.
|
| 415 |
+
|
| 416 |
+
The product of two canonical factors over the same scope
|
| 417 |
+
X is simply:
|
| 418 |
+
|
| 419 |
+
C(K1, h1, g1) * C(K2, h2, g2) = C(K1+K2, h1+h2, g1+g2)
|
| 420 |
+
|
| 421 |
+
The division of canonical forms is defined analogously:
|
| 422 |
+
|
| 423 |
+
C(K1, h1, g1) / C(K2, h2, g2) = C(K1-K2, h1-h2, g1- g2)
|
| 424 |
+
|
| 425 |
+
When we have two canonical factors over different scopes X and Y,
|
| 426 |
+
we simply extend the scope of both to make their scopes match and
|
| 427 |
+
then perform the operation of the above equation. The extension of
|
| 428 |
+
the scope is performed by simply adding zero entries to both the K
|
| 429 |
+
matrices and the h vectors.
|
| 430 |
+
|
| 431 |
+
Parameters
|
| 432 |
+
----------
|
| 433 |
+
other: CanonicalFactor
|
| 434 |
+
The CanonicalDistribution to be multiplied.
|
| 435 |
+
|
| 436 |
+
operation: String
|
| 437 |
+
'product' for multiplication operation and
|
| 438 |
+
'divide' for division operation.
|
| 439 |
+
|
| 440 |
+
Returns
|
| 441 |
+
-------
|
| 442 |
+
CanonicalDistribution or None:
|
| 443 |
+
if inplace=True (default) returns None
|
| 444 |
+
if inplace=False returns a new CanonicalDistribution instance.
|
| 445 |
+
|
| 446 |
+
Example
|
| 447 |
+
-------
|
| 448 |
+
>>> import numpy as np
|
| 449 |
+
>>> from pgmpy.factors.continuous import CanonicalDistribution
|
| 450 |
+
>>> phi1 = CanonicalDistribution(['x1', 'x2', 'x3'],
|
| 451 |
+
np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
|
| 452 |
+
np.array([[1], [4], [-1]]), -2)
|
| 453 |
+
>>> phi2 = CanonicalDistribution(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
|
| 454 |
+
np.array([[5], [-1]]), 1)
|
| 455 |
+
|
| 456 |
+
>>> phi3 = phi1 * phi2
|
| 457 |
+
>>> phi3.K
|
| 458 |
+
array([[ 4., -3., 0.],
|
| 459 |
+
[-3., 8., -2.],
|
| 460 |
+
[ 0., -2., 4.]])
|
| 461 |
+
>>> phi3.h
|
| 462 |
+
array([ 6., 3., -1.])
|
| 463 |
+
>>> phi3.g
|
| 464 |
+
-1
|
| 465 |
+
|
| 466 |
+
>>> phi4 = phi1 / phi2
|
| 467 |
+
>>> phi4.K
|
| 468 |
+
array([[-2., 1., 0.],
|
| 469 |
+
[ 1., 0., -2.],
|
| 470 |
+
[ 0., -2., 4.]])
|
| 471 |
+
>>> phi4.h
|
| 472 |
+
array([-4., 5., -1.])
|
| 473 |
+
>>> phi4.g
|
| 474 |
+
-3
|
| 475 |
+
|
| 476 |
+
"""
|
| 477 |
+
if not isinstance(other, CanonicalDistribution):
|
| 478 |
+
raise TypeError(
|
| 479 |
+
f"CanonicalDistribution object can only be multiplied or divided with an another CanonicalDistribution object. Got {type(other)}, expected CanonicalDistribution."
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
phi = self if inplace else self.copy()
|
| 483 |
+
|
| 484 |
+
all_vars = self.variables + [
|
| 485 |
+
var for var in other.variables if var not in self.variables
|
| 486 |
+
]
|
| 487 |
+
no_of_var = len(all_vars)
|
| 488 |
+
|
| 489 |
+
self_var_index = [all_vars.index(var) for var in self.variables]
|
| 490 |
+
other_var_index = [all_vars.index(var) for var in other.variables]
|
| 491 |
+
|
| 492 |
+
def _extend_K_scope(K, index):
|
| 493 |
+
ext_K = np.zeros([no_of_var, no_of_var])
|
| 494 |
+
ext_K[np.ix_(index, index)] = K
|
| 495 |
+
return ext_K
|
| 496 |
+
|
| 497 |
+
def _extend_h_scope(h, index):
|
| 498 |
+
ext_h = np.zeros(no_of_var).reshape(no_of_var, 1)
|
| 499 |
+
ext_h[index] = h
|
| 500 |
+
return ext_h
|
| 501 |
+
|
| 502 |
+
phi.variables = all_vars
|
| 503 |
+
|
| 504 |
+
if operation == "product":
|
| 505 |
+
phi.K = _extend_K_scope(self.K, self_var_index) + _extend_K_scope(
|
| 506 |
+
other.K, other_var_index
|
| 507 |
+
)
|
| 508 |
+
phi.h = _extend_h_scope(self.h, self_var_index) + _extend_h_scope(
|
| 509 |
+
other.h, other_var_index
|
| 510 |
+
)
|
| 511 |
+
phi.g = self.g + other.g
|
| 512 |
+
|
| 513 |
+
else:
|
| 514 |
+
phi.K = _extend_K_scope(self.K, self_var_index) - _extend_K_scope(
|
| 515 |
+
other.K, other_var_index
|
| 516 |
+
)
|
| 517 |
+
phi.h = _extend_h_scope(self.h, self_var_index) - _extend_h_scope(
|
| 518 |
+
other.h, other_var_index
|
| 519 |
+
)
|
| 520 |
+
phi.g = self.g - other.g
|
| 521 |
+
|
| 522 |
+
if not inplace:
|
| 523 |
+
return phi
|
| 524 |
+
|
| 525 |
+
def product(self, other, inplace=True):
|
| 526 |
+
"""
|
| 527 |
+
Returns the product of two gaussian distributions.
|
| 528 |
+
|
| 529 |
+
Parameters
|
| 530 |
+
----------
|
| 531 |
+
other: CanonicalFactor
|
| 532 |
+
The GaussianDistribution to be multiplied.
|
| 533 |
+
|
| 534 |
+
inplace: boolean
|
| 535 |
+
If True, modifies the distribution itself, otherwise returns a new
|
| 536 |
+
CanonicalDistribution object.
|
| 537 |
+
|
| 538 |
+
Returns
|
| 539 |
+
-------
|
| 540 |
+
CanonicalDistribution or None:
|
| 541 |
+
if inplace=True (default) returns None.
|
| 542 |
+
if inplace=False returns a new CanonicalDistribution instance.
|
| 543 |
+
|
| 544 |
+
Examples
|
| 545 |
+
--------
|
| 546 |
+
>>> import numpy as np
|
| 547 |
+
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
|
| 548 |
+
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
|
| 549 |
+
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
|
| 550 |
+
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
|
| 551 |
+
>>> dis3 = dis1.product(dis2, inplace=False)
|
| 552 |
+
>>> dis3.covariance
|
| 553 |
+
array([[ 3.6, 1. , -0.4, -0.6],
|
| 554 |
+
[ 1. , 2.5, -1. , -1.5],
|
| 555 |
+
[-0.4, -1. , 1.6, 2.4],
|
| 556 |
+
[-1. , -2.5, 4. , 4.5]])
|
| 557 |
+
>>> dis3.mean
|
| 558 |
+
array([[ 1.6],
|
| 559 |
+
[-1.5],
|
| 560 |
+
[ 1.6],
|
| 561 |
+
[ 3.5]])
|
| 562 |
+
"""
|
| 563 |
+
return self._operate(other, operation="product", inplace=inplace)
|
| 564 |
+
|
| 565 |
+
def divide(self, other, inplace=True):
|
| 566 |
+
"""
|
| 567 |
+
Returns the division of two gaussian distributions.
|
| 568 |
+
|
| 569 |
+
Parameters
|
| 570 |
+
----------
|
| 571 |
+
other: GaussianDistribution
|
| 572 |
+
The GaussianDistribution to be divided.
|
| 573 |
+
|
| 574 |
+
inplace: boolean
|
| 575 |
+
If True, modifies the distribution itself, otherwise returns a new
|
| 576 |
+
GaussianDistribution object.
|
| 577 |
+
|
| 578 |
+
Returns
|
| 579 |
+
-------
|
| 580 |
+
CanonicalDistribution or None:
|
| 581 |
+
if inplace=True (default) returns None.
|
| 582 |
+
if inplace=False returns a new CanonicalDistribution instance.
|
| 583 |
+
|
| 584 |
+
Examples
|
| 585 |
+
--------
|
| 586 |
+
>>> import numpy as np
|
| 587 |
+
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
|
| 588 |
+
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
|
| 589 |
+
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
|
| 590 |
+
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
|
| 591 |
+
>>> dis3 = dis1.divide(dis2, inplace=False)
|
| 592 |
+
>>> dis3.covariance
|
| 593 |
+
array([[ 3.6, 1. , -0.4, -0.6],
|
| 594 |
+
[ 1. , 2.5, -1. , -1.5],
|
| 595 |
+
[-0.4, -1. , 1.6, 2.4],
|
| 596 |
+
[-1. , -2.5, 4. , 4.5]])
|
| 597 |
+
>>> dis3.mean
|
| 598 |
+
array([[ 1.6],
|
| 599 |
+
[-1.5],
|
| 600 |
+
[ 1.6],
|
| 601 |
+
[ 3.5]])
|
| 602 |
+
"""
|
| 603 |
+
return self._operate(other, operation="divide", inplace=inplace)
|
| 604 |
+
|
| 605 |
+
def __mul__(self, other):
|
| 606 |
+
return self.product(other, inplace=False)
|
| 607 |
+
|
| 608 |
+
def __rmul__(self, other):
|
| 609 |
+
return self.__mul__(other)
|
| 610 |
+
|
| 611 |
+
def __truediv__(self, other):
|
| 612 |
+
return self.divide(other, inplace=False)
|
| 613 |
+
|
| 614 |
+
__div__ = __truediv__
|