repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
jochym/Elastic | parcalc/parcalc.py | RemoteCalculator.ParallelCalculate | python | def ParallelCalculate(cls,syslst,properties=['energy'],system_changes=all_changes):
'''
Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties.
'''
print('Launching:',end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
try :
s.calc.block=False
s.calc.calculate(atoms=s,properties=properties,system_changes=system_changes)
except CalcNotReadyError:
s.calc.block=True
print(n+1, end=' ')
sys.stdout.flush()
print()
print(' Done:', end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
s.calc.read_results()
print( n+1, end=' ')
sys.stdout.flush()
print()
return syslst | Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties. | train | https://github.com/jochym/Elastic/blob/8daae37d0c48aab8dfb1de2839dab02314817f95/parcalc/parcalc.py#L502-L526 | null | class RemoteCalculator(Calculator):
'''
Remote calculator based on ASE calculator class.
This class is only involved with the machanics of remotly executing
the software and transporting the data. The calculation is
delegated to the actual calculator class.
'''
# Queue system submit command
qsub_tool='qsub'
qstat_tool='qstat'
qsub_cmd='cd %(rdir)s ; %(qsub_tool)s -N %(title)s -l procs=%(procs)d ./run-pw.pbs'
# Remote execution command
remote_exec_cmd='ssh %(user)s@%(host)s "%(command)s"'
# If you cannot mount the data directory into your system it is best
# to use the rsync command to transfer the results back into the system.
# Command for copying the data out to the computing system
copy_out_cmd='rsync -a "%(ldir)s" "%(user)s@%(host)s:%(rdir)s"'
# Command for copying the data in after the calculation
copy_in_cmd='rsync -a "%(user)s@%(host)s:%(rdir)s" "%(ldir)s"'
# Template for the PBS batch job
pbs_template=''
# Command to check the state of the job
pbs_check_cmd='''%(qstat_tool)s -f %(jobid)s |grep job_state |awk '{print $3}' '''
# Access data
host=''
user=''
# Location:
# local working directory
wdir='.'
# Remote working directory relative to the home directory or absolute
rdir='.'
# Repetition timer (seconds) for checkin the state of the job.
job_check_time=15
def __init__(self, restart=None, ignore_bad_restart_file=False, label=None,
atoms=None, calc=None, block=False, **kwargs):
'''Basic calculator implementation.
restart: str
Prefix for restart file. May contain a directory. Default
is None: don't restart.
ignore_bad_restart_file: bool
Ignore broken or missing restart file. By default, it is an
error if the restart file is missing or broken.
label: str
Name used for all files. May contain a directory.
atoms: Atoms object
Optional Atoms object to which the calculator will be
attached. When restarting, atoms will get its positions and
unit-cell updated from file.
Create a remote execution calculator based on actual ASE calculator
calc.
'''
logging.debug("Calc: %s Label: %s" % (calc, label))
Calculator.__init__(self, restart, ignore_bad_restart_file, label, atoms, **kwargs)
logging.debug("Dir: %s Ext: %s" % (self.directory, self.ext))
self.calc=calc
self.jobid=None
self.block=block
def write_pbs_in(self,properties):
with work_dir(self.directory):
with open(os.path.join(self.directory,'run-ase-calc.pbs'),'w') as fh:
fh.write(self.pbs_template % {
'command': self.build_command(self,prop=properties,
params=self.parameters)
})
def build_command(self,prop=['energy'],params={}):
cmd=self.qsub_cmd % {
'qsub_tool': self.qsub_tool,
'qstat_tool': self.qstat_tool,
'title': self.label,
'procs': self.parameters['procs'],
'rdir': os.path.join(self.parameters['rdir'],os.path.split(self.directory)[-1])
}
cmd=self.remote_exec_cmd % {
'command': cmd,
'user': self.parameters['user'],
'host': self.parameters['host']
}
return cmd
def write_input(self, atoms=None, properties=['energy'], system_changes=all_changes):
'''Write input file(s).'''
with work_dir(self.directory):
self.calc.write_input(self, atoms, properties, system_changes)
self.write_pbs_in(properties)
subprocess.call(self.copy_out_cmd % {
'ldir': self.directory,
'rdir': self.parameters['rdir'],
'user': self.parameters['user'],
'host': self.parameters['host']
}, shell=True)
def job_ready(self):
try :
cmd=self.remote_exec_cmd % {
'command': self.pbs_check_cmd % {
'qsub_tool': self.qsub_tool,
'qstat_tool': self.qstat_tool,
'jobid':self.jobid
},
'user': self.parameters['user'],
'host': self.parameters['host']
}
state=subprocess.check_output(cmd, shell=True).split()[-1]
except (subprocess.CalledProcessError, IndexError) :
# Unknown state. We assume it has finished and continue
state='N'
return not (state in ['Q','R'])
def run_calculation(self, atoms=None, properties=['energy'],
system_changes=all_changes):
'''
Internal calculation executor. We cannot use FileIOCalculator
directly since we need to support remote execution.
This calculator is different from others.
It prepares the directory, launches the remote process and
raises the exception to signal that we need to come back for results
when the job is finished.
'''
self.calc.calculate(self, atoms, properties, system_changes)
self.write_input(self.atoms, properties, system_changes)
if self.command is None:
raise RuntimeError('Please configure Remote calculator!')
olddir = os.getcwd()
errorcode=0
try:
os.chdir(self.directory)
output = subprocess.check_output(self.command, shell=True)
self.jobid=output.split()[0]
self.submited=True
#print "Job %s submitted. Waiting for it." % (self.jobid)
# Waiting loop. To be removed.
except subprocess.CalledProcessError as e:
errorcode=e.returncode
finally:
os.chdir(olddir)
if errorcode:
raise RuntimeError('%s returned an error: %d' %
(self.name, errorcode))
self.read_results()
def read_results(self):
"""Read energy, forces, ... from output file(s)."""
if self.submited:
# The job has been submitted. Check the state.
if not self.job_ready() :
if self.block :
while not self.job_ready() :
time.sleep(self.job_check_time)
else :
raise CalcNotReadyError
# Assume the calc finished. Copy the files back.
subprocess.call(self.copy_in_cmd % {
'ldir': self.wdir,
'rdir': os.path.join(self.parameters['rdir'],os.path.split(self.directory)[-1]),
'user': self.parameters['user'],
'host': self.parameters['host']
}, shell=True)
fn=os.path.join(self.directory,'pw.out')
# Read the pan-ultimate line of the output file
try:
ln=open(fn).readlines()[-2]
if ln.find('JOB DONE.')>-1 :
# Job is done we can read the output
r=read_quantumespresso_textoutput(fn)
self.submited=False
self.jobid=None
else :
# Job not ready.
raise CalcNotReadyError
except (IOError, IndexError) :
# Job not ready.
raise CalcNotReadyError
# All is fine - really read the results
self.calc.read_results(self)
@classmethod
|
jochym/Elastic | elastic/cli/elastic.py | gen | python | def gen(ctx, num, lo, hi, size, struct):
'''Generate deformed structures'''
frmt = ctx.parent.params['frmt']
action = ctx.parent.params['action']
cryst = ase.io.read(struct, format=frmt)
fn_tmpl = action
if frmt == 'vasp':
fn_tmpl += '_%03d.POSCAR'
kwargs = {'vasp5': True, 'direct': True}
elif frmt == 'abinit':
fn_tmpl += '_%03d.abinit'
kwargs = {}
if verbose:
from elastic.elastic import get_lattice_type
nr, brav, sg, sgn = get_lattice_type(cryst)
echo('%s lattice (%s): %s' % (brav, sg, cryst.get_chemical_formula()))
if action == 'cij':
echo('Generating {:d} deformations of {:.1f}(%/degs.) per axis'.format(
num, size))
elif action == 'eos':
echo('Generating {:d} deformations from {:.3f} to {:.3f} of V0'.format(
num, lo, hi))
if action == 'cij':
systems = elastic.get_elementary_deformations(cryst, n=num, d=size)
elif action == 'eos':
systems = elastic.scan_volumes(cryst, n=num, lo=lo, hi=hi)
systems.insert(0, cryst)
if verbose:
echo('Writing %d deformation files.' % len(systems))
for n, s in enumerate(systems):
ase.io.write(fn_tmpl % n, s, format=frmt, **kwargs) | Generate deformed structures | train | https://github.com/jochym/Elastic/blob/8daae37d0c48aab8dfb1de2839dab02314817f95/elastic/cli/elastic.py#L83-L117 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 1998-2015 by Paweł T. Jochym <pawel.jochym@ifj.edu.pl>
#
# This file is part of Elastic.
# Elastic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Elastic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Elastic. If not, see <http://www.gnu.org/licenses/>.
'''
The elastic command is a command-line tool exposing the functionality
of elastic library for direct use - without writing any python code.
'''
from __future__ import print_function, absolute_import, division
import click
import ase.io
import elastic
import pkg_resources
from click import echo
verbose = 0
def banner():
if verbose > 1:
echo('Elastic ver. %s\n----------------------' %
pkg_resources.get_distribution("elastic").version)
def set_verbosity(v):
global verbose
verbose = v
def process_calc(fn):
from time import sleep
sleep(1)
@click.group()
@click.option('--vasp', 'frmt', flag_value='vasp',
help='Use VASP formats (default)', default=True)
@click.option('--abinit', 'frmt', flag_value='abinit',
help='Use AbInit formats')
@click.option('--cij', 'action', flag_value='cij',
help='Generate deformations for Cij (default)', default=True)
@click.option('--eos', 'action', flag_value='eos',
help='Generate deformations for Equation of State')
@click.option('-v', '--verbose', count=True, help='Increase verbosity')
@click.version_option()
@click.pass_context
def cli(ctx, frmt, action, verbose):
'''Command-line interface to the elastic library.'''
if verbose:
set_verbosity(verbose)
banner()
@cli.command()
@click.option('-n', '--num', 'num', default=5, type=int,
help='Number of generated deformations per axis (default: 5)')
@click.option('-l', '--lo', 'lo', default=0.98, type=float,
help='Lower relative volume for EOS scan (default: 0.98)')
@click.option('-h', '--hi', 'hi', default=1.02, type=float,
help='Upper relative volume for EOS scan (default: 1.02)')
@click.option('-s', '--size', 'size', default=2.0, type=float,
help='Deformation size for Cij scan (% or deg., default: 2.0)')
@click.argument('struct', type=click.Path(exists=True))
@click.pass_context
@cli.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def proc(ctx, files):
'''Process calculated structures'''
def calc_reader(fn, verb):
if verb:
echo('Reading: {:<60s}\r'.format(fn), nl=False, err=True)
return ase.io.read(fn)
action = ctx.parent.params['action']
systems = [calc_reader(calc, verbose) for calc in files]
if verbose :
echo('', err=True)
if action == 'cij':
cij = elastic.get_elastic_tensor(systems[0], systems=systems[1:])
msv = cij[1][3].max()
eps = 1e-4
if verbose:
echo('Cij solution\n'+30*'-')
echo(' Solution rank: {:2d}{}'.format(
cij[1][2],
' (undetermined)' if cij[1][2] < len(cij[0]) else ''))
if cij[1][2] == len(cij[0]):
echo(' Square of residuals: {:7.2g}'.format(cij[1][1]))
echo(' Relative singular values:')
for sv in cij[1][3]/msv:
echo('{:7.4f}{}'.format(
sv, '* ' if (sv) < eps else ' '), nl=False)
echo('\n\nElastic tensor (GPa):')
for dsc in elastic.elastic.get_cij_order(systems[0]):
echo('{: >7s} '.format(dsc), nl=False)
echo('\n'+30*'-')
for c, sv in zip(cij[0], cij[1][3]/msv):
echo('{:7.2f}{}'.format(
c/ase.units.GPa, '* ' if sv < eps else ' '), nl=False)
echo()
elif action == 'eos':
eos = elastic.get_BM_EOS(systems[0], systems=systems[1:])
eos[1] /= ase.units.GPa
if verbose:
echo('# %7s (A^3) %7s (GPa) %7s' % ("V0", "B0", "B0'"))
echo(' %7.2f %7.2f %7.2f' % tuple(eos))
if __name__ == '__main__':
cli()
|
jochym/Elastic | elastic/cli/elastic.py | proc | python | def proc(ctx, files):
'''Process calculated structures'''
def calc_reader(fn, verb):
if verb:
echo('Reading: {:<60s}\r'.format(fn), nl=False, err=True)
return ase.io.read(fn)
action = ctx.parent.params['action']
systems = [calc_reader(calc, verbose) for calc in files]
if verbose :
echo('', err=True)
if action == 'cij':
cij = elastic.get_elastic_tensor(systems[0], systems=systems[1:])
msv = cij[1][3].max()
eps = 1e-4
if verbose:
echo('Cij solution\n'+30*'-')
echo(' Solution rank: {:2d}{}'.format(
cij[1][2],
' (undetermined)' if cij[1][2] < len(cij[0]) else ''))
if cij[1][2] == len(cij[0]):
echo(' Square of residuals: {:7.2g}'.format(cij[1][1]))
echo(' Relative singular values:')
for sv in cij[1][3]/msv:
echo('{:7.4f}{}'.format(
sv, '* ' if (sv) < eps else ' '), nl=False)
echo('\n\nElastic tensor (GPa):')
for dsc in elastic.elastic.get_cij_order(systems[0]):
echo('{: >7s} '.format(dsc), nl=False)
echo('\n'+30*'-')
for c, sv in zip(cij[0], cij[1][3]/msv):
echo('{:7.2f}{}'.format(
c/ase.units.GPa, '* ' if sv < eps else ' '), nl=False)
echo()
elif action == 'eos':
eos = elastic.get_BM_EOS(systems[0], systems=systems[1:])
eos[1] /= ase.units.GPa
if verbose:
echo('# %7s (A^3) %7s (GPa) %7s' % ("V0", "B0", "B0'"))
echo(' %7.2f %7.2f %7.2f' % tuple(eos)) | Process calculated structures | train | https://github.com/jochym/Elastic/blob/8daae37d0c48aab8dfb1de2839dab02314817f95/elastic/cli/elastic.py#L123-L163 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 1998-2015 by Paweł T. Jochym <pawel.jochym@ifj.edu.pl>
#
# This file is part of Elastic.
# Elastic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Elastic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Elastic. If not, see <http://www.gnu.org/licenses/>.
'''
The elastic command is a command-line tool exposing the functionality
of elastic library for direct use - without writing any python code.
'''
from __future__ import print_function, absolute_import, division
import click
import ase.io
import elastic
import pkg_resources
from click import echo
verbose = 0
def banner():
if verbose > 1:
echo('Elastic ver. %s\n----------------------' %
pkg_resources.get_distribution("elastic").version)
def set_verbosity(v):
global verbose
verbose = v
def process_calc(fn):
from time import sleep
sleep(1)
@click.group()
@click.option('--vasp', 'frmt', flag_value='vasp',
help='Use VASP formats (default)', default=True)
@click.option('--abinit', 'frmt', flag_value='abinit',
help='Use AbInit formats')
@click.option('--cij', 'action', flag_value='cij',
help='Generate deformations for Cij (default)', default=True)
@click.option('--eos', 'action', flag_value='eos',
help='Generate deformations for Equation of State')
@click.option('-v', '--verbose', count=True, help='Increase verbosity')
@click.version_option()
@click.pass_context
def cli(ctx, frmt, action, verbose):
'''Command-line interface to the elastic library.'''
if verbose:
set_verbosity(verbose)
banner()
@cli.command()
@click.option('-n', '--num', 'num', default=5, type=int,
help='Number of generated deformations per axis (default: 5)')
@click.option('-l', '--lo', 'lo', default=0.98, type=float,
help='Lower relative volume for EOS scan (default: 0.98)')
@click.option('-h', '--hi', 'hi', default=1.02, type=float,
help='Upper relative volume for EOS scan (default: 1.02)')
@click.option('-s', '--size', 'size', default=2.0, type=float,
help='Deformation size for Cij scan (% or deg., default: 2.0)')
@click.argument('struct', type=click.Path(exists=True))
@click.pass_context
def gen(ctx, num, lo, hi, size, struct):
'''Generate deformed structures'''
frmt = ctx.parent.params['frmt']
action = ctx.parent.params['action']
cryst = ase.io.read(struct, format=frmt)
fn_tmpl = action
if frmt == 'vasp':
fn_tmpl += '_%03d.POSCAR'
kwargs = {'vasp5': True, 'direct': True}
elif frmt == 'abinit':
fn_tmpl += '_%03d.abinit'
kwargs = {}
if verbose:
from elastic.elastic import get_lattice_type
nr, brav, sg, sgn = get_lattice_type(cryst)
echo('%s lattice (%s): %s' % (brav, sg, cryst.get_chemical_formula()))
if action == 'cij':
echo('Generating {:d} deformations of {:.1f}(%/degs.) per axis'.format(
num, size))
elif action == 'eos':
echo('Generating {:d} deformations from {:.3f} to {:.3f} of V0'.format(
num, lo, hi))
if action == 'cij':
systems = elastic.get_elementary_deformations(cryst, n=num, d=size)
elif action == 'eos':
systems = elastic.scan_volumes(cryst, n=num, lo=lo, hi=hi)
systems.insert(0, cryst)
if verbose:
echo('Writing %d deformation files.' % len(systems))
for n, s in enumerate(systems):
ase.io.write(fn_tmpl % n, s, format=frmt, **kwargs)
@cli.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
if __name__ == '__main__':
cli()
|
release-engineering/productmd | productmd/common.py | parse_nvra | python | def parse_nvra(nvra):
if nvra.endswith(".rpm"):
nvra = nvra[:-4]
result = RPM_NVRA_RE.match(nvra).groupdict()
result["epoch"] = result["epoch"] or 0
result["epoch"] = int(result["epoch"])
return result | Parse RPM N-E:V-R.A string to a dict.
:param nvra: N-E:V-R.A string. This can be a file name or a file path including the '.rpm' suffix.
:type nvra: str
:rtype: dict, with "name", "epoch", "version", "release", and "arch" elements. | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L82-L95 | null | # -*- coding: utf-8 -*-
# pylint: disable=super-on-old-class
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides base classes and common functions
used in other productmd modules.
"""
import os
import sys
import re
import json
import codecs
import contextlib
import ssl
import warnings
import six
from six.moves.configparser import ConfigParser
VERSION = (1, 2)
__all__ = (
"MetadataBase",
"Header",
"VERSION",
"RELEASE_SHORT_RE",
"RELEASE_VERSION_RE",
"RELEASE_TYPE_RE",
"RELEASE_TYPES",
"parse_nvra",
"is_valid_release_short",
"is_valid_release_version",
"is_valid_release_type",
"split_version",
"get_major_version",
"get_minor_version",
"create_release_id",
"parse_release_id",
)
# HACK: dumped from rpmUtils.arch which is not available on python3
# one less dependency at least :)
RPM_ARCHES = [
"aarch64", "alpha", "alphaev4", "alphaev45", "alphaev5", "alphaev56", "alphaev6", "alphaev67", "alphaev68",
"alphaev7", "alphapca56", "amd64", "arm64", "armhfp", "armv5tejl", "armv5tel", "armv6hl", "armv6l", "armv7hl",
"armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "ppc", "ppc64",
"ppc64iseries", "ppc64le", "ppc64p7", "ppc64pseries", "s390", "s390x", "sh3", "sh4", "sh4a",
"sparc", "sparc64", "sparc64v", "sparcv8", "sparcv9", "sparcv9v", "x86_64",
"src", "nosrc", "noarch",
]
#: Pattern to parse RPM N-E:V-R.A
RPM_NVRA_RE = re.compile(r"^(.*/)?(?P<name>.*)-((?P<epoch>\d+):)?(?P<version>.*)-(?P<release>.*)\.(?P<arch>.*)$")
#: Validation regex for release short name: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_SHORT_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Validation regex for release version: any string or [0-9] separated with dots.
RELEASE_VERSION_RE = re.compile(r"^([^0-9].*|([0-9]+(\.?[0-9]+)*))$")
#: Validation regex for release type: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_TYPE_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Known release types. New values need to be added here if they contain a
# dash, otherwise parsing release IDs will not be reliable.
RELEASE_TYPES = [
"fast",
"ga",
"updates",
"updates-testing",
"eus",
"aus",
"els",
"tus",
"e4s",
]
def is_valid_release_short(short):
"""
Determine if given release short name is valid.
:param short: Release short name
:type short: str
:rtype: bool
"""
match = RELEASE_SHORT_RE.match(short)
return match is not None
def is_valid_release_version(version):
"""
Determine if given release version is valid.
:param version: Release version
:type version: str
:rtype: bool
"""
match = RELEASE_VERSION_RE.match(version)
return match is not None
def is_valid_release_type(release_type):
"""
Determine if given release type is valid.
:param release_type: Release type
:type release_type: str
:rtype: bool
"""
match = RELEASE_TYPE_RE.match(release_type)
return match is not None
def _urlopen(path):
kwargs = {}
if hasattr(ssl, '_create_unverified_context'):
# We only want to use the `context` keyword argument if it has a value.
# Older Python versions (<2.7.9) do not support it. In those cases the
# ssl module will not have the method to create the context.
kwargs['context'] = ssl._create_unverified_context()
return six.moves.urllib.request.urlopen(path, **kwargs)
@contextlib.contextmanager
def _open_file_obj(f, mode="r"):
"""
A context manager that provides access to a file.
:param f: the file to be opened
:type f: a file-like object or path to file
:param mode: how to open the file
:type mode: string
"""
if isinstance(f, six.string_types):
if f.startswith(("http://", "https://")):
file_obj = _urlopen(f)
yield file_obj
file_obj.close()
else:
with open(f, mode) as file_obj:
yield file_obj
else:
yield f
def _file_exists(path):
if path.startswith(("http://", "https://")):
try:
file_obj = _urlopen(path)
file_obj.close()
except six.moves.urllib.error.HTTPError:
return False
return True
return os.path.exists(path)
class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
class Header(MetadataBase):
"""
This class represents the header used in serialized metadata files.
It consists of a type and a version. The type is meant purely for consumers
of the file to know what they are dealing with without having to check
filename. The version is used by productmd when parsing the file.
"""
def __init__(self, parent, metadata_type):
self._section = "header"
self.parent = parent
self.version = "0.0"
self.metadata_type = metadata_type
def _validate_version(self):
self._assert_type("version", six.string_types)
self._assert_matches_re("version", [r"^\d+\.\d+$"])
@property
def version_tuple(self):
self.validate()
return tuple(split_version(self.version))
def set_current_version(self):
self.version = ".".join([str(i) for i in VERSION])
def serialize(self, parser):
# write *current* version, because format gets converted on save
self.set_current_version()
self.validate()
data = parser
data[self._section] = {}
data[self._section]["type"] = self.metadata_type
data[self._section]["version"] = self.version
def deserialize(self, parser):
data = parser
self.version = data[self._section]["version"]
if self.version_tuple >= (1, 1):
metadata_type = data[self._section]["type"]
if metadata_type != self.metadata_type:
raise ValueError("Invalid metadata type '%s', expected '%s'" % (metadata_type, self.metadata_type))
self.validate()
def split_version(version):
"""
Split version to a list of integers
that can be easily compared.
:param version: Release version
:type version: str
:rtype: [int] or [string]
"""
if re.match("^[^0-9].*", version):
return [version]
return [int(i) for i in version.split(".")]
def get_major_version(version, remove=None):
"""Return major version of a provided version string. Major version is the
first component of the dot-separated version string. For non-version-like
strings this function returns the argument unchanged.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
return version_split[0]
def get_minor_version(version, remove=None):
"""Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
"""
Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str
"""
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result
def _parse_release_id_part(release_id, prefix=""):
if release_id.count("-") == 1:
# TODO: what if short contains '-'?
short, version = release_id.split("-")
release_type = "ga"
else:
release_type = None
for type_ in RELEASE_TYPES:
# Try to find a known release type.
if release_id.endswith(type_):
release_type = type_
break
if release_type:
# Found, remove it from the parsed string (because there could be a
# dash causing problems).
release_id = release_id[:-len(release_type)]
short, version, release_type_extracted = release_id.rsplit("-", 2)
# If known release type is found, use it; otherwise fall back to the
# one we parsed out.
release_type = release_type or release_type_extracted
result = {
"short": short,
"version": version,
"type": release_type,
}
result = dict([("%s%s" % (prefix, key), value) for key, value in result.items()])
return result
class SortedDict(dict):
def __iter__(self):
for key in self.keys():
yield key
def iterkeys(self):
for key in self.keys():
yield key
def itervalues(self):
for key in self.keys():
yield self[key]
def keys(self):
return sorted(dict.keys(self), reverse=False)
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def items(self):
return self.iteritems()
class SortedConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] == 2:
if sys.version_info[:2] >= (2, 6):
# SafeConfigParser(dict_type=) supported in 2.6+
kwargs["dict_type"] = SortedDict
ConfigParser.__init__(self, *args, **kwargs)
else:
kwargs["dict_type"] = SortedDict
super(SortedConfigParser, self).__init__(*args, **kwargs)
self.seen = set()
def optionxform(self, optionstr):
# don't convert options to lower()
return optionstr
def option_lookup(self, section_option_list, default=None):
for section, option in section_option_list:
if self.has_option(section, option):
return self.get(section, option)
return default
def read_file(self, *args, **kwargs):
if sys.version_info[0] == 2:
return self.readfp(*args, **kwargs)
return super(SortedConfigParser, self).read_file(*args, **kwargs)
|
release-engineering/productmd | productmd/common.py | _open_file_obj | python | def _open_file_obj(f, mode="r"):
if isinstance(f, six.string_types):
if f.startswith(("http://", "https://")):
file_obj = _urlopen(f)
yield file_obj
file_obj.close()
else:
with open(f, mode) as file_obj:
yield file_obj
else:
yield f | A context manager that provides access to a file.
:param f: the file to be opened
:type f: a file-like object or path to file
:param mode: how to open the file
:type mode: string | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L172-L190 | null | # -*- coding: utf-8 -*-
# pylint: disable=super-on-old-class
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides base classes and common functions
used in other productmd modules.
"""
import os
import sys
import re
import json
import codecs
import contextlib
import ssl
import warnings
import six
from six.moves.configparser import ConfigParser
VERSION = (1, 2)
__all__ = (
"MetadataBase",
"Header",
"VERSION",
"RELEASE_SHORT_RE",
"RELEASE_VERSION_RE",
"RELEASE_TYPE_RE",
"RELEASE_TYPES",
"parse_nvra",
"is_valid_release_short",
"is_valid_release_version",
"is_valid_release_type",
"split_version",
"get_major_version",
"get_minor_version",
"create_release_id",
"parse_release_id",
)
# HACK: dumped from rpmUtils.arch which is not available on python3
# one less dependency at least :)
RPM_ARCHES = [
"aarch64", "alpha", "alphaev4", "alphaev45", "alphaev5", "alphaev56", "alphaev6", "alphaev67", "alphaev68",
"alphaev7", "alphapca56", "amd64", "arm64", "armhfp", "armv5tejl", "armv5tel", "armv6hl", "armv6l", "armv7hl",
"armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "ppc", "ppc64",
"ppc64iseries", "ppc64le", "ppc64p7", "ppc64pseries", "s390", "s390x", "sh3", "sh4", "sh4a",
"sparc", "sparc64", "sparc64v", "sparcv8", "sparcv9", "sparcv9v", "x86_64",
"src", "nosrc", "noarch",
]
#: Pattern to parse RPM N-E:V-R.A
RPM_NVRA_RE = re.compile(r"^(.*/)?(?P<name>.*)-((?P<epoch>\d+):)?(?P<version>.*)-(?P<release>.*)\.(?P<arch>.*)$")
def parse_nvra(nvra):
"""
Parse RPM N-E:V-R.A string to a dict.
:param nvra: N-E:V-R.A string. This can be a file name or a file path including the '.rpm' suffix.
:type nvra: str
:rtype: dict, with "name", "epoch", "version", "release", and "arch" elements.
"""
if nvra.endswith(".rpm"):
nvra = nvra[:-4]
result = RPM_NVRA_RE.match(nvra).groupdict()
result["epoch"] = result["epoch"] or 0
result["epoch"] = int(result["epoch"])
return result
#: Validation regex for release short name: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_SHORT_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Validation regex for release version: any string or [0-9] separated with dots.
RELEASE_VERSION_RE = re.compile(r"^([^0-9].*|([0-9]+(\.?[0-9]+)*))$")
#: Validation regex for release type: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_TYPE_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Known release types. New values need to be added here if they contain a
# dash, otherwise parsing release IDs will not be reliable.
RELEASE_TYPES = [
"fast",
"ga",
"updates",
"updates-testing",
"eus",
"aus",
"els",
"tus",
"e4s",
]
def is_valid_release_short(short):
"""
Determine if given release short name is valid.
:param short: Release short name
:type short: str
:rtype: bool
"""
match = RELEASE_SHORT_RE.match(short)
return match is not None
def is_valid_release_version(version):
"""
Determine if given release version is valid.
:param version: Release version
:type version: str
:rtype: bool
"""
match = RELEASE_VERSION_RE.match(version)
return match is not None
def is_valid_release_type(release_type):
"""
Determine if given release type is valid.
:param release_type: Release type
:type release_type: str
:rtype: bool
"""
match = RELEASE_TYPE_RE.match(release_type)
return match is not None
def _urlopen(path):
kwargs = {}
if hasattr(ssl, '_create_unverified_context'):
# We only want to use the `context` keyword argument if it has a value.
# Older Python versions (<2.7.9) do not support it. In those cases the
# ssl module will not have the method to create the context.
kwargs['context'] = ssl._create_unverified_context()
return six.moves.urllib.request.urlopen(path, **kwargs)
@contextlib.contextmanager
def _file_exists(path):
if path.startswith(("http://", "https://")):
try:
file_obj = _urlopen(path)
file_obj.close()
except six.moves.urllib.error.HTTPError:
return False
return True
return os.path.exists(path)
class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
class Header(MetadataBase):
"""
This class represents the header used in serialized metadata files.
It consists of a type and a version. The type is meant purely for consumers
of the file to know what they are dealing with without having to check
filename. The version is used by productmd when parsing the file.
"""
def __init__(self, parent, metadata_type):
self._section = "header"
self.parent = parent
self.version = "0.0"
self.metadata_type = metadata_type
def _validate_version(self):
self._assert_type("version", six.string_types)
self._assert_matches_re("version", [r"^\d+\.\d+$"])
@property
def version_tuple(self):
self.validate()
return tuple(split_version(self.version))
def set_current_version(self):
self.version = ".".join([str(i) for i in VERSION])
def serialize(self, parser):
# write *current* version, because format gets converted on save
self.set_current_version()
self.validate()
data = parser
data[self._section] = {}
data[self._section]["type"] = self.metadata_type
data[self._section]["version"] = self.version
def deserialize(self, parser):
data = parser
self.version = data[self._section]["version"]
if self.version_tuple >= (1, 1):
metadata_type = data[self._section]["type"]
if metadata_type != self.metadata_type:
raise ValueError("Invalid metadata type '%s', expected '%s'" % (metadata_type, self.metadata_type))
self.validate()
def split_version(version):
"""
Split version to a list of integers
that can be easily compared.
:param version: Release version
:type version: str
:rtype: [int] or [string]
"""
if re.match("^[^0-9].*", version):
return [version]
return [int(i) for i in version.split(".")]
def get_major_version(version, remove=None):
"""Return major version of a provided version string. Major version is the
first component of the dot-separated version string. For non-version-like
strings this function returns the argument unchanged.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
return version_split[0]
def get_minor_version(version, remove=None):
"""Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
"""
Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str
"""
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result
def _parse_release_id_part(release_id, prefix=""):
if release_id.count("-") == 1:
# TODO: what if short contains '-'?
short, version = release_id.split("-")
release_type = "ga"
else:
release_type = None
for type_ in RELEASE_TYPES:
# Try to find a known release type.
if release_id.endswith(type_):
release_type = type_
break
if release_type:
# Found, remove it from the parsed string (because there could be a
# dash causing problems).
release_id = release_id[:-len(release_type)]
short, version, release_type_extracted = release_id.rsplit("-", 2)
# If known release type is found, use it; otherwise fall back to the
# one we parsed out.
release_type = release_type or release_type_extracted
result = {
"short": short,
"version": version,
"type": release_type,
}
result = dict([("%s%s" % (prefix, key), value) for key, value in result.items()])
return result
class SortedDict(dict):
def __iter__(self):
for key in self.keys():
yield key
def iterkeys(self):
for key in self.keys():
yield key
def itervalues(self):
for key in self.keys():
yield self[key]
def keys(self):
return sorted(dict.keys(self), reverse=False)
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def items(self):
return self.iteritems()
class SortedConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] == 2:
if sys.version_info[:2] >= (2, 6):
# SafeConfigParser(dict_type=) supported in 2.6+
kwargs["dict_type"] = SortedDict
ConfigParser.__init__(self, *args, **kwargs)
else:
kwargs["dict_type"] = SortedDict
super(SortedConfigParser, self).__init__(*args, **kwargs)
self.seen = set()
def optionxform(self, optionstr):
# don't convert options to lower()
return optionstr
def option_lookup(self, section_option_list, default=None):
for section, option in section_option_list:
if self.has_option(section, option):
return self.get(section, option)
return default
def read_file(self, *args, **kwargs):
if sys.version_info[0] == 2:
return self.readfp(*args, **kwargs)
return super(SortedConfigParser, self).read_file(*args, **kwargs)
|
release-engineering/productmd | productmd/common.py | split_version | python | def split_version(version):
if re.match("^[^0-9].*", version):
return [version]
return [int(i) for i in version.split(".")] | Split version to a list of integers
that can be easily compared.
:param version: Release version
:type version: str
:rtype: [int] or [string] | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L376-L387 | null | # -*- coding: utf-8 -*-
# pylint: disable=super-on-old-class
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides base classes and common functions
used in other productmd modules.
"""
import os
import sys
import re
import json
import codecs
import contextlib
import ssl
import warnings
import six
from six.moves.configparser import ConfigParser
VERSION = (1, 2)
__all__ = (
"MetadataBase",
"Header",
"VERSION",
"RELEASE_SHORT_RE",
"RELEASE_VERSION_RE",
"RELEASE_TYPE_RE",
"RELEASE_TYPES",
"parse_nvra",
"is_valid_release_short",
"is_valid_release_version",
"is_valid_release_type",
"split_version",
"get_major_version",
"get_minor_version",
"create_release_id",
"parse_release_id",
)
# HACK: dumped from rpmUtils.arch which is not available on python3
# one less dependency at least :)
RPM_ARCHES = [
"aarch64", "alpha", "alphaev4", "alphaev45", "alphaev5", "alphaev56", "alphaev6", "alphaev67", "alphaev68",
"alphaev7", "alphapca56", "amd64", "arm64", "armhfp", "armv5tejl", "armv5tel", "armv6hl", "armv6l", "armv7hl",
"armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "ppc", "ppc64",
"ppc64iseries", "ppc64le", "ppc64p7", "ppc64pseries", "s390", "s390x", "sh3", "sh4", "sh4a",
"sparc", "sparc64", "sparc64v", "sparcv8", "sparcv9", "sparcv9v", "x86_64",
"src", "nosrc", "noarch",
]
#: Pattern to parse RPM N-E:V-R.A
RPM_NVRA_RE = re.compile(r"^(.*/)?(?P<name>.*)-((?P<epoch>\d+):)?(?P<version>.*)-(?P<release>.*)\.(?P<arch>.*)$")
def parse_nvra(nvra):
"""
Parse RPM N-E:V-R.A string to a dict.
:param nvra: N-E:V-R.A string. This can be a file name or a file path including the '.rpm' suffix.
:type nvra: str
:rtype: dict, with "name", "epoch", "version", "release", and "arch" elements.
"""
if nvra.endswith(".rpm"):
nvra = nvra[:-4]
result = RPM_NVRA_RE.match(nvra).groupdict()
result["epoch"] = result["epoch"] or 0
result["epoch"] = int(result["epoch"])
return result
#: Validation regex for release short name: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_SHORT_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Validation regex for release version: any string or [0-9] separated with dots.
RELEASE_VERSION_RE = re.compile(r"^([^0-9].*|([0-9]+(\.?[0-9]+)*))$")
#: Validation regex for release type: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_TYPE_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Known release types. New values need to be added here if they contain a
# dash, otherwise parsing release IDs will not be reliable.
RELEASE_TYPES = [
"fast",
"ga",
"updates",
"updates-testing",
"eus",
"aus",
"els",
"tus",
"e4s",
]
def is_valid_release_short(short):
"""
Determine if given release short name is valid.
:param short: Release short name
:type short: str
:rtype: bool
"""
match = RELEASE_SHORT_RE.match(short)
return match is not None
def is_valid_release_version(version):
"""
Determine if given release version is valid.
:param version: Release version
:type version: str
:rtype: bool
"""
match = RELEASE_VERSION_RE.match(version)
return match is not None
def is_valid_release_type(release_type):
"""
Determine if given release type is valid.
:param release_type: Release type
:type release_type: str
:rtype: bool
"""
match = RELEASE_TYPE_RE.match(release_type)
return match is not None
def _urlopen(path):
kwargs = {}
if hasattr(ssl, '_create_unverified_context'):
# We only want to use the `context` keyword argument if it has a value.
# Older Python versions (<2.7.9) do not support it. In those cases the
# ssl module will not have the method to create the context.
kwargs['context'] = ssl._create_unverified_context()
return six.moves.urllib.request.urlopen(path, **kwargs)
@contextlib.contextmanager
def _open_file_obj(f, mode="r"):
"""
A context manager that provides access to a file.
:param f: the file to be opened
:type f: a file-like object or path to file
:param mode: how to open the file
:type mode: string
"""
if isinstance(f, six.string_types):
if f.startswith(("http://", "https://")):
file_obj = _urlopen(f)
yield file_obj
file_obj.close()
else:
with open(f, mode) as file_obj:
yield file_obj
else:
yield f
def _file_exists(path):
if path.startswith(("http://", "https://")):
try:
file_obj = _urlopen(path)
file_obj.close()
except six.moves.urllib.error.HTTPError:
return False
return True
return os.path.exists(path)
class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
class Header(MetadataBase):
"""
This class represents the header used in serialized metadata files.
It consists of a type and a version. The type is meant purely for consumers
of the file to know what they are dealing with without having to check
filename. The version is used by productmd when parsing the file.
"""
def __init__(self, parent, metadata_type):
self._section = "header"
self.parent = parent
self.version = "0.0"
self.metadata_type = metadata_type
def _validate_version(self):
self._assert_type("version", six.string_types)
self._assert_matches_re("version", [r"^\d+\.\d+$"])
@property
def version_tuple(self):
self.validate()
return tuple(split_version(self.version))
def set_current_version(self):
self.version = ".".join([str(i) for i in VERSION])
def serialize(self, parser):
# write *current* version, because format gets converted on save
self.set_current_version()
self.validate()
data = parser
data[self._section] = {}
data[self._section]["type"] = self.metadata_type
data[self._section]["version"] = self.version
def deserialize(self, parser):
data = parser
self.version = data[self._section]["version"]
if self.version_tuple >= (1, 1):
metadata_type = data[self._section]["type"]
if metadata_type != self.metadata_type:
raise ValueError("Invalid metadata type '%s', expected '%s'" % (metadata_type, self.metadata_type))
self.validate()
def get_major_version(version, remove=None):
"""Return major version of a provided version string. Major version is the
first component of the dot-separated version string. For non-version-like
strings this function returns the argument unchanged.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
return version_split[0]
def get_minor_version(version, remove=None):
"""Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
"""
Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str
"""
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result
def _parse_release_id_part(release_id, prefix=""):
if release_id.count("-") == 1:
# TODO: what if short contains '-'?
short, version = release_id.split("-")
release_type = "ga"
else:
release_type = None
for type_ in RELEASE_TYPES:
# Try to find a known release type.
if release_id.endswith(type_):
release_type = type_
break
if release_type:
# Found, remove it from the parsed string (because there could be a
# dash causing problems).
release_id = release_id[:-len(release_type)]
short, version, release_type_extracted = release_id.rsplit("-", 2)
# If known release type is found, use it; otherwise fall back to the
# one we parsed out.
release_type = release_type or release_type_extracted
result = {
"short": short,
"version": version,
"type": release_type,
}
result = dict([("%s%s" % (prefix, key), value) for key, value in result.items()])
return result
class SortedDict(dict):
def __iter__(self):
for key in self.keys():
yield key
def iterkeys(self):
for key in self.keys():
yield key
def itervalues(self):
for key in self.keys():
yield self[key]
def keys(self):
return sorted(dict.keys(self), reverse=False)
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def items(self):
return self.iteritems()
class SortedConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] == 2:
if sys.version_info[:2] >= (2, 6):
# SafeConfigParser(dict_type=) supported in 2.6+
kwargs["dict_type"] = SortedDict
ConfigParser.__init__(self, *args, **kwargs)
else:
kwargs["dict_type"] = SortedDict
super(SortedConfigParser, self).__init__(*args, **kwargs)
self.seen = set()
def optionxform(self, optionstr):
# don't convert options to lower()
return optionstr
def option_lookup(self, section_option_list, default=None):
for section, option in section_option_list:
if self.has_option(section, option):
return self.get(section, option)
return default
def read_file(self, *args, **kwargs):
if sys.version_info[0] == 2:
return self.readfp(*args, **kwargs)
return super(SortedConfigParser, self).read_file(*args, **kwargs)
|
release-engineering/productmd | productmd/common.py | get_major_version | python | def get_major_version(version, remove=None):
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
return version_split[0] | Return major version of a provided version string. Major version is the
first component of the dot-separated version string. For non-version-like
strings this function returns the argument unchanged.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L390-L405 | null | # -*- coding: utf-8 -*-
# pylint: disable=super-on-old-class
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides base classes and common functions
used in other productmd modules.
"""
import os
import sys
import re
import json
import codecs
import contextlib
import ssl
import warnings
import six
from six.moves.configparser import ConfigParser
VERSION = (1, 2)
__all__ = (
"MetadataBase",
"Header",
"VERSION",
"RELEASE_SHORT_RE",
"RELEASE_VERSION_RE",
"RELEASE_TYPE_RE",
"RELEASE_TYPES",
"parse_nvra",
"is_valid_release_short",
"is_valid_release_version",
"is_valid_release_type",
"split_version",
"get_major_version",
"get_minor_version",
"create_release_id",
"parse_release_id",
)
# HACK: dumped from rpmUtils.arch which is not available on python3
# one less dependency at least :)
RPM_ARCHES = [
"aarch64", "alpha", "alphaev4", "alphaev45", "alphaev5", "alphaev56", "alphaev6", "alphaev67", "alphaev68",
"alphaev7", "alphapca56", "amd64", "arm64", "armhfp", "armv5tejl", "armv5tel", "armv6hl", "armv6l", "armv7hl",
"armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "ppc", "ppc64",
"ppc64iseries", "ppc64le", "ppc64p7", "ppc64pseries", "s390", "s390x", "sh3", "sh4", "sh4a",
"sparc", "sparc64", "sparc64v", "sparcv8", "sparcv9", "sparcv9v", "x86_64",
"src", "nosrc", "noarch",
]
#: Pattern to parse RPM N-E:V-R.A
RPM_NVRA_RE = re.compile(r"^(.*/)?(?P<name>.*)-((?P<epoch>\d+):)?(?P<version>.*)-(?P<release>.*)\.(?P<arch>.*)$")
def parse_nvra(nvra):
"""
Parse RPM N-E:V-R.A string to a dict.
:param nvra: N-E:V-R.A string. This can be a file name or a file path including the '.rpm' suffix.
:type nvra: str
:rtype: dict, with "name", "epoch", "version", "release", and "arch" elements.
"""
if nvra.endswith(".rpm"):
nvra = nvra[:-4]
result = RPM_NVRA_RE.match(nvra).groupdict()
result["epoch"] = result["epoch"] or 0
result["epoch"] = int(result["epoch"])
return result
#: Validation regex for release short name: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_SHORT_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Validation regex for release version: any string or [0-9] separated with dots.
RELEASE_VERSION_RE = re.compile(r"^([^0-9].*|([0-9]+(\.?[0-9]+)*))$")
#: Validation regex for release type: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_TYPE_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Known release types. New values need to be added here if they contain a
# dash, otherwise parsing release IDs will not be reliable.
RELEASE_TYPES = [
"fast",
"ga",
"updates",
"updates-testing",
"eus",
"aus",
"els",
"tus",
"e4s",
]
def is_valid_release_short(short):
"""
Determine if given release short name is valid.
:param short: Release short name
:type short: str
:rtype: bool
"""
match = RELEASE_SHORT_RE.match(short)
return match is not None
def is_valid_release_version(version):
"""
Determine if given release version is valid.
:param version: Release version
:type version: str
:rtype: bool
"""
match = RELEASE_VERSION_RE.match(version)
return match is not None
def is_valid_release_type(release_type):
"""
Determine if given release type is valid.
:param release_type: Release type
:type release_type: str
:rtype: bool
"""
match = RELEASE_TYPE_RE.match(release_type)
return match is not None
def _urlopen(path):
kwargs = {}
if hasattr(ssl, '_create_unverified_context'):
# We only want to use the `context` keyword argument if it has a value.
# Older Python versions (<2.7.9) do not support it. In those cases the
# ssl module will not have the method to create the context.
kwargs['context'] = ssl._create_unverified_context()
return six.moves.urllib.request.urlopen(path, **kwargs)
@contextlib.contextmanager
def _open_file_obj(f, mode="r"):
"""
A context manager that provides access to a file.
:param f: the file to be opened
:type f: a file-like object or path to file
:param mode: how to open the file
:type mode: string
"""
if isinstance(f, six.string_types):
if f.startswith(("http://", "https://")):
file_obj = _urlopen(f)
yield file_obj
file_obj.close()
else:
with open(f, mode) as file_obj:
yield file_obj
else:
yield f
def _file_exists(path):
if path.startswith(("http://", "https://")):
try:
file_obj = _urlopen(path)
file_obj.close()
except six.moves.urllib.error.HTTPError:
return False
return True
return os.path.exists(path)
class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
class Header(MetadataBase):
"""
This class represents the header used in serialized metadata files.
It consists of a type and a version. The type is meant purely for consumers
of the file to know what they are dealing with without having to check
filename. The version is used by productmd when parsing the file.
"""
def __init__(self, parent, metadata_type):
self._section = "header"
self.parent = parent
self.version = "0.0"
self.metadata_type = metadata_type
def _validate_version(self):
self._assert_type("version", six.string_types)
self._assert_matches_re("version", [r"^\d+\.\d+$"])
@property
def version_tuple(self):
self.validate()
return tuple(split_version(self.version))
def set_current_version(self):
self.version = ".".join([str(i) for i in VERSION])
def serialize(self, parser):
# write *current* version, because format gets converted on save
self.set_current_version()
self.validate()
data = parser
data[self._section] = {}
data[self._section]["type"] = self.metadata_type
data[self._section]["version"] = self.version
def deserialize(self, parser):
data = parser
self.version = data[self._section]["version"]
if self.version_tuple >= (1, 1):
metadata_type = data[self._section]["type"]
if metadata_type != self.metadata_type:
raise ValueError("Invalid metadata type '%s', expected '%s'" % (metadata_type, self.metadata_type))
self.validate()
def split_version(version):
"""
Split version to a list of integers
that can be easily compared.
:param version: Release version
:type version: str
:rtype: [int] or [string]
"""
if re.match("^[^0-9].*", version):
return [version]
return [int(i) for i in version.split(".")]
def get_minor_version(version, remove=None):
"""Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
"""
Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str
"""
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result
def _parse_release_id_part(release_id, prefix=""):
if release_id.count("-") == 1:
# TODO: what if short contains '-'?
short, version = release_id.split("-")
release_type = "ga"
else:
release_type = None
for type_ in RELEASE_TYPES:
# Try to find a known release type.
if release_id.endswith(type_):
release_type = type_
break
if release_type:
# Found, remove it from the parsed string (because there could be a
# dash causing problems).
release_id = release_id[:-len(release_type)]
short, version, release_type_extracted = release_id.rsplit("-", 2)
# If known release type is found, use it; otherwise fall back to the
# one we parsed out.
release_type = release_type or release_type_extracted
result = {
"short": short,
"version": version,
"type": release_type,
}
result = dict([("%s%s" % (prefix, key), value) for key, value in result.items()])
return result
class SortedDict(dict):
def __iter__(self):
for key in self.keys():
yield key
def iterkeys(self):
for key in self.keys():
yield key
def itervalues(self):
for key in self.keys():
yield self[key]
def keys(self):
return sorted(dict.keys(self), reverse=False)
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def items(self):
return self.iteritems()
class SortedConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] == 2:
if sys.version_info[:2] >= (2, 6):
# SafeConfigParser(dict_type=) supported in 2.6+
kwargs["dict_type"] = SortedDict
ConfigParser.__init__(self, *args, **kwargs)
else:
kwargs["dict_type"] = SortedDict
super(SortedConfigParser, self).__init__(*args, **kwargs)
self.seen = set()
def optionxform(self, optionstr):
# don't convert options to lower()
return optionstr
def option_lookup(self, section_option_list, default=None):
for section, option in section_option_list:
if self.has_option(section, option):
return self.get(section, option)
return default
def read_file(self, *args, **kwargs):
if sys.version_info[0] == 2:
return self.readfp(*args, **kwargs)
return super(SortedConfigParser, self).read_file(*args, **kwargs)
|
release-engineering/productmd | productmd/common.py | get_minor_version | python | def get_minor_version(version, remove=None):
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None | Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L408-L427 | null | # -*- coding: utf-8 -*-
# pylint: disable=super-on-old-class
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides base classes and common functions
used in other productmd modules.
"""
import os
import sys
import re
import json
import codecs
import contextlib
import ssl
import warnings
import six
from six.moves.configparser import ConfigParser
VERSION = (1, 2)
__all__ = (
"MetadataBase",
"Header",
"VERSION",
"RELEASE_SHORT_RE",
"RELEASE_VERSION_RE",
"RELEASE_TYPE_RE",
"RELEASE_TYPES",
"parse_nvra",
"is_valid_release_short",
"is_valid_release_version",
"is_valid_release_type",
"split_version",
"get_major_version",
"get_minor_version",
"create_release_id",
"parse_release_id",
)
# HACK: dumped from rpmUtils.arch which is not available on python3
# one less dependency at least :)
RPM_ARCHES = [
"aarch64", "alpha", "alphaev4", "alphaev45", "alphaev5", "alphaev56", "alphaev6", "alphaev67", "alphaev68",
"alphaev7", "alphapca56", "amd64", "arm64", "armhfp", "armv5tejl", "armv5tel", "armv6hl", "armv6l", "armv7hl",
"armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "ppc", "ppc64",
"ppc64iseries", "ppc64le", "ppc64p7", "ppc64pseries", "s390", "s390x", "sh3", "sh4", "sh4a",
"sparc", "sparc64", "sparc64v", "sparcv8", "sparcv9", "sparcv9v", "x86_64",
"src", "nosrc", "noarch",
]
#: Pattern to parse RPM N-E:V-R.A
RPM_NVRA_RE = re.compile(r"^(.*/)?(?P<name>.*)-((?P<epoch>\d+):)?(?P<version>.*)-(?P<release>.*)\.(?P<arch>.*)$")
def parse_nvra(nvra):
"""
Parse RPM N-E:V-R.A string to a dict.
:param nvra: N-E:V-R.A string. This can be a file name or a file path including the '.rpm' suffix.
:type nvra: str
:rtype: dict, with "name", "epoch", "version", "release", and "arch" elements.
"""
if nvra.endswith(".rpm"):
nvra = nvra[:-4]
result = RPM_NVRA_RE.match(nvra).groupdict()
result["epoch"] = result["epoch"] or 0
result["epoch"] = int(result["epoch"])
return result
#: Validation regex for release short name: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_SHORT_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Validation regex for release version: any string or [0-9] separated with dots.
RELEASE_VERSION_RE = re.compile(r"^([^0-9].*|([0-9]+(\.?[0-9]+)*))$")
#: Validation regex for release type: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_TYPE_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Known release types. New values need to be added here if they contain a
# dash, otherwise parsing release IDs will not be reliable.
RELEASE_TYPES = [
"fast",
"ga",
"updates",
"updates-testing",
"eus",
"aus",
"els",
"tus",
"e4s",
]
def is_valid_release_short(short):
"""
Determine if given release short name is valid.
:param short: Release short name
:type short: str
:rtype: bool
"""
match = RELEASE_SHORT_RE.match(short)
return match is not None
def is_valid_release_version(version):
"""
Determine if given release version is valid.
:param version: Release version
:type version: str
:rtype: bool
"""
match = RELEASE_VERSION_RE.match(version)
return match is not None
def is_valid_release_type(release_type):
"""
Determine if given release type is valid.
:param release_type: Release type
:type release_type: str
:rtype: bool
"""
match = RELEASE_TYPE_RE.match(release_type)
return match is not None
def _urlopen(path):
kwargs = {}
if hasattr(ssl, '_create_unverified_context'):
# We only want to use the `context` keyword argument if it has a value.
# Older Python versions (<2.7.9) do not support it. In those cases the
# ssl module will not have the method to create the context.
kwargs['context'] = ssl._create_unverified_context()
return six.moves.urllib.request.urlopen(path, **kwargs)
@contextlib.contextmanager
def _open_file_obj(f, mode="r"):
"""
A context manager that provides access to a file.
:param f: the file to be opened
:type f: a file-like object or path to file
:param mode: how to open the file
:type mode: string
"""
if isinstance(f, six.string_types):
if f.startswith(("http://", "https://")):
file_obj = _urlopen(f)
yield file_obj
file_obj.close()
else:
with open(f, mode) as file_obj:
yield file_obj
else:
yield f
def _file_exists(path):
if path.startswith(("http://", "https://")):
try:
file_obj = _urlopen(path)
file_obj.close()
except six.moves.urllib.error.HTTPError:
return False
return True
return os.path.exists(path)
class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
class Header(MetadataBase):
"""
This class represents the header used in serialized metadata files.
It consists of a type and a version. The type is meant purely for consumers
of the file to know what they are dealing with without having to check
filename. The version is used by productmd when parsing the file.
"""
def __init__(self, parent, metadata_type):
self._section = "header"
self.parent = parent
self.version = "0.0"
self.metadata_type = metadata_type
def _validate_version(self):
self._assert_type("version", six.string_types)
self._assert_matches_re("version", [r"^\d+\.\d+$"])
@property
def version_tuple(self):
self.validate()
return tuple(split_version(self.version))
def set_current_version(self):
self.version = ".".join([str(i) for i in VERSION])
def serialize(self, parser):
# write *current* version, because format gets converted on save
self.set_current_version()
self.validate()
data = parser
data[self._section] = {}
data[self._section]["type"] = self.metadata_type
data[self._section]["version"] = self.version
def deserialize(self, parser):
data = parser
self.version = data[self._section]["version"]
if self.version_tuple >= (1, 1):
metadata_type = data[self._section]["type"]
if metadata_type != self.metadata_type:
raise ValueError("Invalid metadata type '%s', expected '%s'" % (metadata_type, self.metadata_type))
self.validate()
def split_version(version):
"""
Split version to a list of integers
that can be easily compared.
:param version: Release version
:type version: str
:rtype: [int] or [string]
"""
if re.match("^[^0-9].*", version):
return [version]
return [int(i) for i in version.split(".")]
def get_major_version(version, remove=None):
"""Return major version of a provided version string. Major version is the
first component of the dot-separated version string. For non-version-like
strings this function returns the argument unchanged.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
return version_split[0]
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
"""
Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str
"""
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result
def _parse_release_id_part(release_id, prefix=""):
if release_id.count("-") == 1:
# TODO: what if short contains '-'?
short, version = release_id.split("-")
release_type = "ga"
else:
release_type = None
for type_ in RELEASE_TYPES:
# Try to find a known release type.
if release_id.endswith(type_):
release_type = type_
break
if release_type:
# Found, remove it from the parsed string (because there could be a
# dash causing problems).
release_id = release_id[:-len(release_type)]
short, version, release_type_extracted = release_id.rsplit("-", 2)
# If known release type is found, use it; otherwise fall back to the
# one we parsed out.
release_type = release_type or release_type_extracted
result = {
"short": short,
"version": version,
"type": release_type,
}
result = dict([("%s%s" % (prefix, key), value) for key, value in result.items()])
return result
class SortedDict(dict):
def __iter__(self):
for key in self.keys():
yield key
def iterkeys(self):
for key in self.keys():
yield key
def itervalues(self):
for key in self.keys():
yield self[key]
def keys(self):
return sorted(dict.keys(self), reverse=False)
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def items(self):
return self.iteritems()
class SortedConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] == 2:
if sys.version_info[:2] >= (2, 6):
# SafeConfigParser(dict_type=) supported in 2.6+
kwargs["dict_type"] = SortedDict
ConfigParser.__init__(self, *args, **kwargs)
else:
kwargs["dict_type"] = SortedDict
super(SortedConfigParser, self).__init__(*args, **kwargs)
self.seen = set()
def optionxform(self, optionstr):
# don't convert options to lower()
return optionstr
def option_lookup(self, section_option_list, default=None):
for section, option in section_option_list:
if self.has_option(section, option):
return self.get(section, option)
return default
def read_file(self, *args, **kwargs):
if sys.version_info[0] == 2:
return self.readfp(*args, **kwargs)
return super(SortedConfigParser, self).read_file(*args, **kwargs)
|
release-engineering/productmd | productmd/common.py | create_release_id | python | def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result | Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L430-L462 | [
"def is_valid_release_short(short):\n \"\"\"\n Determine if given release short name is valid.\n\n :param short: Release short name\n :type short: str\n :rtype: bool\n \"\"\"\n match = RELEASE_SHORT_RE.match(short)\n return match is not None\n",
"def is_valid_release_version(version):\n ... | # -*- coding: utf-8 -*-
# pylint: disable=super-on-old-class
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides base classes and common functions
used in other productmd modules.
"""
import os
import sys
import re
import json
import codecs
import contextlib
import ssl
import warnings
import six
from six.moves.configparser import ConfigParser
VERSION = (1, 2)
__all__ = (
"MetadataBase",
"Header",
"VERSION",
"RELEASE_SHORT_RE",
"RELEASE_VERSION_RE",
"RELEASE_TYPE_RE",
"RELEASE_TYPES",
"parse_nvra",
"is_valid_release_short",
"is_valid_release_version",
"is_valid_release_type",
"split_version",
"get_major_version",
"get_minor_version",
"create_release_id",
"parse_release_id",
)
# HACK: dumped from rpmUtils.arch which is not available on python3
# one less dependency at least :)
RPM_ARCHES = [
"aarch64", "alpha", "alphaev4", "alphaev45", "alphaev5", "alphaev56", "alphaev6", "alphaev67", "alphaev68",
"alphaev7", "alphapca56", "amd64", "arm64", "armhfp", "armv5tejl", "armv5tel", "armv6hl", "armv6l", "armv7hl",
"armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "ppc", "ppc64",
"ppc64iseries", "ppc64le", "ppc64p7", "ppc64pseries", "s390", "s390x", "sh3", "sh4", "sh4a",
"sparc", "sparc64", "sparc64v", "sparcv8", "sparcv9", "sparcv9v", "x86_64",
"src", "nosrc", "noarch",
]
#: Pattern to parse RPM N-E:V-R.A
RPM_NVRA_RE = re.compile(r"^(.*/)?(?P<name>.*)-((?P<epoch>\d+):)?(?P<version>.*)-(?P<release>.*)\.(?P<arch>.*)$")
def parse_nvra(nvra):
"""
Parse RPM N-E:V-R.A string to a dict.
:param nvra: N-E:V-R.A string. This can be a file name or a file path including the '.rpm' suffix.
:type nvra: str
:rtype: dict, with "name", "epoch", "version", "release", and "arch" elements.
"""
if nvra.endswith(".rpm"):
nvra = nvra[:-4]
result = RPM_NVRA_RE.match(nvra).groupdict()
result["epoch"] = result["epoch"] or 0
result["epoch"] = int(result["epoch"])
return result
#: Validation regex for release short name: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_SHORT_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Validation regex for release version: any string or [0-9] separated with dots.
RELEASE_VERSION_RE = re.compile(r"^([^0-9].*|([0-9]+(\.?[0-9]+)*))$")
#: Validation regex for release type: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_TYPE_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Known release types. New values need to be added here if they contain a
# dash, otherwise parsing release IDs will not be reliable.
RELEASE_TYPES = [
"fast",
"ga",
"updates",
"updates-testing",
"eus",
"aus",
"els",
"tus",
"e4s",
]
def is_valid_release_short(short):
"""
Determine if given release short name is valid.
:param short: Release short name
:type short: str
:rtype: bool
"""
match = RELEASE_SHORT_RE.match(short)
return match is not None
def is_valid_release_version(version):
"""
Determine if given release version is valid.
:param version: Release version
:type version: str
:rtype: bool
"""
match = RELEASE_VERSION_RE.match(version)
return match is not None
def is_valid_release_type(release_type):
"""
Determine if given release type is valid.
:param release_type: Release type
:type release_type: str
:rtype: bool
"""
match = RELEASE_TYPE_RE.match(release_type)
return match is not None
def _urlopen(path):
kwargs = {}
if hasattr(ssl, '_create_unverified_context'):
# We only want to use the `context` keyword argument if it has a value.
# Older Python versions (<2.7.9) do not support it. In those cases the
# ssl module will not have the method to create the context.
kwargs['context'] = ssl._create_unverified_context()
return six.moves.urllib.request.urlopen(path, **kwargs)
@contextlib.contextmanager
def _open_file_obj(f, mode="r"):
"""
A context manager that provides access to a file.
:param f: the file to be opened
:type f: a file-like object or path to file
:param mode: how to open the file
:type mode: string
"""
if isinstance(f, six.string_types):
if f.startswith(("http://", "https://")):
file_obj = _urlopen(f)
yield file_obj
file_obj.close()
else:
with open(f, mode) as file_obj:
yield file_obj
else:
yield f
def _file_exists(path):
if path.startswith(("http://", "https://")):
try:
file_obj = _urlopen(path)
file_obj.close()
except six.moves.urllib.error.HTTPError:
return False
return True
return os.path.exists(path)
class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
class Header(MetadataBase):
"""
This class represents the header used in serialized metadata files.
It consists of a type and a version. The type is meant purely for consumers
of the file to know what they are dealing with without having to check
filename. The version is used by productmd when parsing the file.
"""
def __init__(self, parent, metadata_type):
self._section = "header"
self.parent = parent
self.version = "0.0"
self.metadata_type = metadata_type
def _validate_version(self):
self._assert_type("version", six.string_types)
self._assert_matches_re("version", [r"^\d+\.\d+$"])
@property
def version_tuple(self):
self.validate()
return tuple(split_version(self.version))
def set_current_version(self):
self.version = ".".join([str(i) for i in VERSION])
def serialize(self, parser):
# write *current* version, because format gets converted on save
self.set_current_version()
self.validate()
data = parser
data[self._section] = {}
data[self._section]["type"] = self.metadata_type
data[self._section]["version"] = self.version
def deserialize(self, parser):
data = parser
self.version = data[self._section]["version"]
if self.version_tuple >= (1, 1):
metadata_type = data[self._section]["type"]
if metadata_type != self.metadata_type:
raise ValueError("Invalid metadata type '%s', expected '%s'" % (metadata_type, self.metadata_type))
self.validate()
def split_version(version):
"""
Split version to a list of integers
that can be easily compared.
:param version: Release version
:type version: str
:rtype: [int] or [string]
"""
if re.match("^[^0-9].*", version):
return [version]
return [int(i) for i in version.split(".")]
def get_major_version(version, remove=None):
"""Return major version of a provided version string. Major version is the
first component of the dot-separated version string. For non-version-like
strings this function returns the argument unchanged.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
return version_split[0]
def get_minor_version(version, remove=None):
"""Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result
def _parse_release_id_part(release_id, prefix=""):
if release_id.count("-") == 1:
# TODO: what if short contains '-'?
short, version = release_id.split("-")
release_type = "ga"
else:
release_type = None
for type_ in RELEASE_TYPES:
# Try to find a known release type.
if release_id.endswith(type_):
release_type = type_
break
if release_type:
# Found, remove it from the parsed string (because there could be a
# dash causing problems).
release_id = release_id[:-len(release_type)]
short, version, release_type_extracted = release_id.rsplit("-", 2)
# If known release type is found, use it; otherwise fall back to the
# one we parsed out.
release_type = release_type or release_type_extracted
result = {
"short": short,
"version": version,
"type": release_type,
}
result = dict([("%s%s" % (prefix, key), value) for key, value in result.items()])
return result
class SortedDict(dict):
def __iter__(self):
for key in self.keys():
yield key
def iterkeys(self):
for key in self.keys():
yield key
def itervalues(self):
for key in self.keys():
yield self[key]
def keys(self):
return sorted(dict.keys(self), reverse=False)
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def items(self):
return self.iteritems()
class SortedConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] == 2:
if sys.version_info[:2] >= (2, 6):
# SafeConfigParser(dict_type=) supported in 2.6+
kwargs["dict_type"] = SortedDict
ConfigParser.__init__(self, *args, **kwargs)
else:
kwargs["dict_type"] = SortedDict
super(SortedConfigParser, self).__init__(*args, **kwargs)
self.seen = set()
def optionxform(self, optionstr):
# don't convert options to lower()
return optionstr
def option_lookup(self, section_option_list, default=None):
for section, option in section_option_list:
if self.has_option(section, option):
return self.get(section, option)
return default
def read_file(self, *args, **kwargs):
if sys.version_info[0] == 2:
return self.readfp(*args, **kwargs)
return super(SortedConfigParser, self).read_file(*args, **kwargs)
|
release-engineering/productmd | productmd/common.py | parse_release_id | python | def parse_release_id(release_id):
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result | Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L465-L485 | [
"def _parse_release_id_part(release_id, prefix=\"\"):\n if release_id.count(\"-\") == 1:\n # TODO: what if short contains '-'?\n short, version = release_id.split(\"-\")\n release_type = \"ga\"\n else:\n release_type = None\n for type_ in RELEASE_TYPES:\n # Try to... | # -*- coding: utf-8 -*-
# pylint: disable=super-on-old-class
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides base classes and common functions
used in other productmd modules.
"""
import os
import sys
import re
import json
import codecs
import contextlib
import ssl
import warnings
import six
from six.moves.configparser import ConfigParser
VERSION = (1, 2)
__all__ = (
"MetadataBase",
"Header",
"VERSION",
"RELEASE_SHORT_RE",
"RELEASE_VERSION_RE",
"RELEASE_TYPE_RE",
"RELEASE_TYPES",
"parse_nvra",
"is_valid_release_short",
"is_valid_release_version",
"is_valid_release_type",
"split_version",
"get_major_version",
"get_minor_version",
"create_release_id",
"parse_release_id",
)
# HACK: dumped from rpmUtils.arch which is not available on python3
# one less dependency at least :)
RPM_ARCHES = [
"aarch64", "alpha", "alphaev4", "alphaev45", "alphaev5", "alphaev56", "alphaev6", "alphaev67", "alphaev68",
"alphaev7", "alphapca56", "amd64", "arm64", "armhfp", "armv5tejl", "armv5tel", "armv6hl", "armv6l", "armv7hl",
"armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "ppc", "ppc64",
"ppc64iseries", "ppc64le", "ppc64p7", "ppc64pseries", "s390", "s390x", "sh3", "sh4", "sh4a",
"sparc", "sparc64", "sparc64v", "sparcv8", "sparcv9", "sparcv9v", "x86_64",
"src", "nosrc", "noarch",
]
#: Pattern to parse RPM N-E:V-R.A
RPM_NVRA_RE = re.compile(r"^(.*/)?(?P<name>.*)-((?P<epoch>\d+):)?(?P<version>.*)-(?P<release>.*)\.(?P<arch>.*)$")
def parse_nvra(nvra):
"""
Parse RPM N-E:V-R.A string to a dict.
:param nvra: N-E:V-R.A string. This can be a file name or a file path including the '.rpm' suffix.
:type nvra: str
:rtype: dict, with "name", "epoch", "version", "release", and "arch" elements.
"""
if nvra.endswith(".rpm"):
nvra = nvra[:-4]
result = RPM_NVRA_RE.match(nvra).groupdict()
result["epoch"] = result["epoch"] or 0
result["epoch"] = int(result["epoch"])
return result
#: Validation regex for release short name: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_SHORT_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Validation regex for release version: any string or [0-9] separated with dots.
RELEASE_VERSION_RE = re.compile(r"^([^0-9].*|([0-9]+(\.?[0-9]+)*))$")
#: Validation regex for release type: [a-z] followed by [a-z0-9] separated with dashes.
RELEASE_TYPE_RE = re.compile(r"^[a-z]+([a-z0-9]*-?[a-z0-9]+)*$")
#: Known release types. New values need to be added here if they contain a
# dash, otherwise parsing release IDs will not be reliable.
RELEASE_TYPES = [
"fast",
"ga",
"updates",
"updates-testing",
"eus",
"aus",
"els",
"tus",
"e4s",
]
def is_valid_release_short(short):
"""
Determine if given release short name is valid.
:param short: Release short name
:type short: str
:rtype: bool
"""
match = RELEASE_SHORT_RE.match(short)
return match is not None
def is_valid_release_version(version):
"""
Determine if given release version is valid.
:param version: Release version
:type version: str
:rtype: bool
"""
match = RELEASE_VERSION_RE.match(version)
return match is not None
def is_valid_release_type(release_type):
"""
Determine if given release type is valid.
:param release_type: Release type
:type release_type: str
:rtype: bool
"""
match = RELEASE_TYPE_RE.match(release_type)
return match is not None
def _urlopen(path):
kwargs = {}
if hasattr(ssl, '_create_unverified_context'):
# We only want to use the `context` keyword argument if it has a value.
# Older Python versions (<2.7.9) do not support it. In those cases the
# ssl module will not have the method to create the context.
kwargs['context'] = ssl._create_unverified_context()
return six.moves.urllib.request.urlopen(path, **kwargs)
@contextlib.contextmanager
def _open_file_obj(f, mode="r"):
"""
A context manager that provides access to a file.
:param f: the file to be opened
:type f: a file-like object or path to file
:param mode: how to open the file
:type mode: string
"""
if isinstance(f, six.string_types):
if f.startswith(("http://", "https://")):
file_obj = _urlopen(f)
yield file_obj
file_obj.close()
else:
with open(f, mode) as file_obj:
yield file_obj
else:
yield f
def _file_exists(path):
if path.startswith(("http://", "https://")):
try:
file_obj = _urlopen(path)
file_obj.close()
except six.moves.urllib.error.HTTPError:
return False
return True
return os.path.exists(path)
class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
class Header(MetadataBase):
"""
This class represents the header used in serialized metadata files.
It consists of a type and a version. The type is meant purely for consumers
of the file to know what they are dealing with without having to check
filename. The version is used by productmd when parsing the file.
"""
def __init__(self, parent, metadata_type):
self._section = "header"
self.parent = parent
self.version = "0.0"
self.metadata_type = metadata_type
def _validate_version(self):
self._assert_type("version", six.string_types)
self._assert_matches_re("version", [r"^\d+\.\d+$"])
@property
def version_tuple(self):
self.validate()
return tuple(split_version(self.version))
def set_current_version(self):
self.version = ".".join([str(i) for i in VERSION])
def serialize(self, parser):
# write *current* version, because format gets converted on save
self.set_current_version()
self.validate()
data = parser
data[self._section] = {}
data[self._section]["type"] = self.metadata_type
data[self._section]["version"] = self.version
def deserialize(self, parser):
data = parser
self.version = data[self._section]["version"]
if self.version_tuple >= (1, 1):
metadata_type = data[self._section]["type"]
if metadata_type != self.metadata_type:
raise ValueError("Invalid metadata type '%s', expected '%s'" % (metadata_type, self.metadata_type))
self.validate()
def split_version(version):
"""
Split version to a list of integers
that can be easily compared.
:param version: Release version
:type version: str
:rtype: [int] or [string]
"""
if re.match("^[^0-9].*", version):
return [version]
return [int(i) for i in version.split(".")]
def get_major_version(version, remove=None):
"""Return major version of a provided version string. Major version is the
first component of the dot-separated version string. For non-version-like
strings this function returns the argument unchanged.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
return version_split[0]
def get_minor_version(version, remove=None):
"""Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
"""
Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str
"""
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result
def _parse_release_id_part(release_id, prefix=""):
if release_id.count("-") == 1:
# TODO: what if short contains '-'?
short, version = release_id.split("-")
release_type = "ga"
else:
release_type = None
for type_ in RELEASE_TYPES:
# Try to find a known release type.
if release_id.endswith(type_):
release_type = type_
break
if release_type:
# Found, remove it from the parsed string (because there could be a
# dash causing problems).
release_id = release_id[:-len(release_type)]
short, version, release_type_extracted = release_id.rsplit("-", 2)
# If known release type is found, use it; otherwise fall back to the
# one we parsed out.
release_type = release_type or release_type_extracted
result = {
"short": short,
"version": version,
"type": release_type,
}
result = dict([("%s%s" % (prefix, key), value) for key, value in result.items()])
return result
class SortedDict(dict):
def __iter__(self):
for key in self.keys():
yield key
def iterkeys(self):
for key in self.keys():
yield key
def itervalues(self):
for key in self.keys():
yield self[key]
def keys(self):
return sorted(dict.keys(self), reverse=False)
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def items(self):
return self.iteritems()
class SortedConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] == 2:
if sys.version_info[:2] >= (2, 6):
# SafeConfigParser(dict_type=) supported in 2.6+
kwargs["dict_type"] = SortedDict
ConfigParser.__init__(self, *args, **kwargs)
else:
kwargs["dict_type"] = SortedDict
super(SortedConfigParser, self).__init__(*args, **kwargs)
self.seen = set()
def optionxform(self, optionstr):
# don't convert options to lower()
return optionstr
def option_lookup(self, section_option_list, default=None):
for section, option in section_option_list:
if self.has_option(section, option):
return self.get(section, option)
return default
def read_file(self, *args, **kwargs):
if sys.version_info[0] == 2:
return self.readfp(*args, **kwargs)
return super(SortedConfigParser, self).read_file(*args, **kwargs)
|
release-engineering/productmd | productmd/common.py | MetadataBase._assert_matches_re | python | def _assert_matches_re(self, field, expected_patterns):
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns)) | The list of patterns can contain either strings or compiled regular
expressions. | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L222-L237 | null | class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
|
release-engineering/productmd | productmd/common.py | MetadataBase.validate | python | def validate(self):
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method() | Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L239-L249 | null | class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
|
release-engineering/productmd | productmd/common.py | MetadataBase.load | python | def load(self, f):
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser) | Load data from a file.
:param f: file-like object or path to file
:type f: file or str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L254-L263 | [
"def parse_file(self, f):\n # parse file, return parser or dict with data\n if hasattr(f, \"seekable\"):\n if f.seekable():\n f.seek(0)\n elif hasattr(f, \"seek\"):\n f.seek(0)\n if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):\n # HTTPResponse needs spec... | class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
|
release-engineering/productmd | productmd/common.py | MetadataBase.loads | python | def loads(self, s):
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate() | Load data from a string.
:param s: input data
:type s: str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L265-L276 | [
"def validate(self):\n \"\"\"\n Validate attributes by running all self._validate_*() methods.\n\n :raises TypeError: if an attribute has invalid type\n :raises ValueError: if an attribute contains invalid value\n \"\"\"\n method_names = sorted([i for i in dir(self) if i.startswith(\"_validate\") ... | class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
|
release-engineering/productmd | productmd/common.py | MetadataBase.dump | python | def dump(self, f):
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f) | Dump data to a file.
:param f: file-like object or path to file
:type f: file or str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L278-L289 | [
"def validate(self):\n \"\"\"\n Validate attributes by running all self._validate_*() methods.\n\n :raises TypeError: if an attribute has invalid type\n :raises ValueError: if an attribute contains invalid value\n \"\"\"\n method_names = sorted([i for i in dir(self) if i.startswith(\"_validate\") ... | class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dumps(self):
"""
Dump data to a string.
:rtype: str
"""
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read()
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
|
release-engineering/productmd | productmd/common.py | MetadataBase.dumps | python | def dumps(self):
io = six.StringIO()
self.dump(io)
io.seek(0)
return io.read() | Dump data to a string.
:rtype: str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L291-L300 | [
"def dump(self, f):\n \"\"\"\n Dump data to a file.\n\n :param f: file-like object or path to file\n :type f: file or str\n \"\"\"\n self.validate()\n with _open_file_obj(f, \"w\") as f:\n parser = self._get_parser()\n self.serialize(parser)\n self.build_file(parser, f)\n"
... | class MetadataBase(object):
def _assert_type(self, field, expected_types):
value = getattr(self, field)
for atype in expected_types:
if isinstance(value, atype):
return
raise TypeError("%s: Field '%s' has invalid type: %s" % (self.__class__.__name__, field, type(value)))
def _assert_value(self, field, expected_values):
value = getattr(self, field)
if value not in expected_values:
raise ValueError("%s: Field '%s' has invalid value: %s" % (self.__class__.__name__, field, value))
def _assert_not_blank(self, field):
value = getattr(self, field)
if not value:
raise ValueError("%s: Field '%s' must not be blank" % (self.__class__.__name__, field))
def _assert_matches_re(self, field, expected_patterns):
"""
The list of patterns can contain either strings or compiled regular
expressions.
"""
value = getattr(self, field)
for pattern in expected_patterns:
try:
if pattern.match(value):
return
except AttributeError:
# It's not a compiled regex, treat it as string.
if re.match(pattern, value):
return
raise ValueError("%s: Field '%s' has invalid value: %s. It does not match any provided REs: %s"
% (self.__class__.__name__, field, value, expected_patterns))
def validate(self):
"""
Validate attributes by running all self._validate_*() methods.
:raises TypeError: if an attribute has invalid type
:raises ValueError: if an attribute contains invalid value
"""
method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))])
for method_name in method_names:
method = getattr(self, method_name)
method()
def _get_parser(self):
return {}
def load(self, f):
"""
Load data from a file.
:param f: file-like object or path to file
:type f: file or str
"""
with _open_file_obj(f) as f:
parser = self.parse_file(f)
self.deserialize(parser)
def loads(self, s):
"""
Load data from a string.
:param s: input data
:type s: str
"""
io = six.StringIO()
io.write(s)
io.seek(0)
self.load(io)
self.validate()
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
def parse_file(self, f):
# parse file, return parser or dict with data
if hasattr(f, "seekable"):
if f.seekable():
f.seek(0)
elif hasattr(f, "seek"):
f.seek(0)
if six.PY3 and isinstance(f, six.moves.http_client.HTTPResponse):
# HTTPResponse needs special handling in py3
reader = codecs.getreader("utf-8")
parser = json.load(reader(f))
else:
parser = json.load(f)
return parser
def build_file(self, parser, f):
# build file from parser or dict with data
json.dump(parser, f, indent=4, sort_keys=True, separators = (",", ": "))
def deserialize(self, parser):
# copy data from parser to instance
raise NotImplementedError
def serialize(self, parser):
# copy data from instance to parser
raise NotImplementedError
|
release-engineering/productmd | productmd/compose.py | Compose.info | python | def info(self):
if self._composeinfo is not None:
return self._composeinfo
paths = [
"metadata/composeinfo.json",
]
self._composeinfo = self._load_metadata(paths, productmd.composeinfo.ComposeInfo)
return self._composeinfo | (:class:`productmd.composeinfo.ComposeInfo`) -- Compose metadata | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/compose.py#L92-L101 | [
"def _load_metadata(self, paths, cls):\n path = self._find_metadata_file(paths)\n obj = cls()\n try:\n obj.load(path)\n except ValueError:\n raise RuntimeError('%s is not a valid JSON file.' % path)\n return obj\n"
] | class Compose(object):
"""
This class provides easy access to compose metadata.
:param compose_path: Path to a compose. HTTP(s) URL is also accepted.
:type compose_path: str
"""
def __init__(self, compose_path):
# example: MYPRODUCT-1.0-YYYYMMDD.0/metadata
self.compose_path = compose_path
# example: MYPRODUCT-1.0-YYYYMMDD.0/compose/metadata (preferred location)
path = os.path.join(compose_path, "compose")
if _file_exists(path):
self.compose_path = path
elif "://" not in compose_path and os.path.exists(compose_path):
# Scan all subdirs under compose_path for 'metadata'. Doesn't work over HTTP.
# example: MYPRODUCT-1.0-YYYYMMDD.0/1.0/metadata (legacy location)
for i in os.listdir(compose_path):
path = os.path.join(compose_path, i)
metadata_path = os.path.join(path, "metadata")
if _file_exists(metadata_path):
self.compose_path = path
break
self._composeinfo = None
self._images = None
self._rpms = None
self._modules = None
def _find_metadata_file(self, paths):
for i in paths:
path = os.path.join(self.compose_path, i)
if _file_exists(path):
return path
raise RuntimeError('Failed to load metadata from %s' % self.compose_path)
@property
@property
def images(self):
"""(:class:`productmd.images.Images`) -- Compose images metadata"""
if self._images is not None:
return self._images
paths = [
"metadata/images.json",
"metadata/image-manifest.json",
]
self._images = self._load_metadata(paths, productmd.images.Images)
return self._images
@property
def rpms(self):
"""(:class:`productmd.rpms.Rpms`) -- Compose RPMs metadata"""
if self._rpms is not None:
return self._rpms
paths = [
"metadata/rpms.json",
"metadata/rpm-manifest.json",
]
self._rpms = self._load_metadata(paths, productmd.rpms.Rpms)
return self._rpms
@property
def modules(self):
"""(:class:`productmd.modules.Modules`) -- Compose Modules metadata"""
if self._modules is not None:
return self._modules
paths = [
"metadata/modules.json",
]
self._modules = self._load_metadata(paths, productmd.modules.Modules)
return self._modules
def _load_metadata(self, paths, cls):
path = self._find_metadata_file(paths)
obj = cls()
try:
obj.load(path)
except ValueError:
raise RuntimeError('%s is not a valid JSON file.' % path)
return obj
|
release-engineering/productmd | productmd/compose.py | Compose.images | python | def images(self):
if self._images is not None:
return self._images
paths = [
"metadata/images.json",
"metadata/image-manifest.json",
]
self._images = self._load_metadata(paths, productmd.images.Images)
return self._images | (:class:`productmd.images.Images`) -- Compose images metadata | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/compose.py#L104-L114 | [
"def _load_metadata(self, paths, cls):\n path = self._find_metadata_file(paths)\n obj = cls()\n try:\n obj.load(path)\n except ValueError:\n raise RuntimeError('%s is not a valid JSON file.' % path)\n return obj\n"
] | class Compose(object):
"""
This class provides easy access to compose metadata.
:param compose_path: Path to a compose. HTTP(s) URL is also accepted.
:type compose_path: str
"""
def __init__(self, compose_path):
# example: MYPRODUCT-1.0-YYYYMMDD.0/metadata
self.compose_path = compose_path
# example: MYPRODUCT-1.0-YYYYMMDD.0/compose/metadata (preferred location)
path = os.path.join(compose_path, "compose")
if _file_exists(path):
self.compose_path = path
elif "://" not in compose_path and os.path.exists(compose_path):
# Scan all subdirs under compose_path for 'metadata'. Doesn't work over HTTP.
# example: MYPRODUCT-1.0-YYYYMMDD.0/1.0/metadata (legacy location)
for i in os.listdir(compose_path):
path = os.path.join(compose_path, i)
metadata_path = os.path.join(path, "metadata")
if _file_exists(metadata_path):
self.compose_path = path
break
self._composeinfo = None
self._images = None
self._rpms = None
self._modules = None
def _find_metadata_file(self, paths):
for i in paths:
path = os.path.join(self.compose_path, i)
if _file_exists(path):
return path
raise RuntimeError('Failed to load metadata from %s' % self.compose_path)
@property
def info(self):
"""(:class:`productmd.composeinfo.ComposeInfo`) -- Compose metadata"""
if self._composeinfo is not None:
return self._composeinfo
paths = [
"metadata/composeinfo.json",
]
self._composeinfo = self._load_metadata(paths, productmd.composeinfo.ComposeInfo)
return self._composeinfo
@property
@property
def rpms(self):
"""(:class:`productmd.rpms.Rpms`) -- Compose RPMs metadata"""
if self._rpms is not None:
return self._rpms
paths = [
"metadata/rpms.json",
"metadata/rpm-manifest.json",
]
self._rpms = self._load_metadata(paths, productmd.rpms.Rpms)
return self._rpms
@property
def modules(self):
"""(:class:`productmd.modules.Modules`) -- Compose Modules metadata"""
if self._modules is not None:
return self._modules
paths = [
"metadata/modules.json",
]
self._modules = self._load_metadata(paths, productmd.modules.Modules)
return self._modules
def _load_metadata(self, paths, cls):
path = self._find_metadata_file(paths)
obj = cls()
try:
obj.load(path)
except ValueError:
raise RuntimeError('%s is not a valid JSON file.' % path)
return obj
|
release-engineering/productmd | productmd/compose.py | Compose.rpms | python | def rpms(self):
if self._rpms is not None:
return self._rpms
paths = [
"metadata/rpms.json",
"metadata/rpm-manifest.json",
]
self._rpms = self._load_metadata(paths, productmd.rpms.Rpms)
return self._rpms | (:class:`productmd.rpms.Rpms`) -- Compose RPMs metadata | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/compose.py#L117-L127 | [
"def _load_metadata(self, paths, cls):\n path = self._find_metadata_file(paths)\n obj = cls()\n try:\n obj.load(path)\n except ValueError:\n raise RuntimeError('%s is not a valid JSON file.' % path)\n return obj\n"
] | class Compose(object):
"""
This class provides easy access to compose metadata.
:param compose_path: Path to a compose. HTTP(s) URL is also accepted.
:type compose_path: str
"""
def __init__(self, compose_path):
# example: MYPRODUCT-1.0-YYYYMMDD.0/metadata
self.compose_path = compose_path
# example: MYPRODUCT-1.0-YYYYMMDD.0/compose/metadata (preferred location)
path = os.path.join(compose_path, "compose")
if _file_exists(path):
self.compose_path = path
elif "://" not in compose_path and os.path.exists(compose_path):
# Scan all subdirs under compose_path for 'metadata'. Doesn't work over HTTP.
# example: MYPRODUCT-1.0-YYYYMMDD.0/1.0/metadata (legacy location)
for i in os.listdir(compose_path):
path = os.path.join(compose_path, i)
metadata_path = os.path.join(path, "metadata")
if _file_exists(metadata_path):
self.compose_path = path
break
self._composeinfo = None
self._images = None
self._rpms = None
self._modules = None
def _find_metadata_file(self, paths):
for i in paths:
path = os.path.join(self.compose_path, i)
if _file_exists(path):
return path
raise RuntimeError('Failed to load metadata from %s' % self.compose_path)
@property
def info(self):
"""(:class:`productmd.composeinfo.ComposeInfo`) -- Compose metadata"""
if self._composeinfo is not None:
return self._composeinfo
paths = [
"metadata/composeinfo.json",
]
self._composeinfo = self._load_metadata(paths, productmd.composeinfo.ComposeInfo)
return self._composeinfo
@property
def images(self):
"""(:class:`productmd.images.Images`) -- Compose images metadata"""
if self._images is not None:
return self._images
paths = [
"metadata/images.json",
"metadata/image-manifest.json",
]
self._images = self._load_metadata(paths, productmd.images.Images)
return self._images
@property
@property
def modules(self):
"""(:class:`productmd.modules.Modules`) -- Compose Modules metadata"""
if self._modules is not None:
return self._modules
paths = [
"metadata/modules.json",
]
self._modules = self._load_metadata(paths, productmd.modules.Modules)
return self._modules
def _load_metadata(self, paths, cls):
path = self._find_metadata_file(paths)
obj = cls()
try:
obj.load(path)
except ValueError:
raise RuntimeError('%s is not a valid JSON file.' % path)
return obj
|
release-engineering/productmd | productmd/compose.py | Compose.modules | python | def modules(self):
if self._modules is not None:
return self._modules
paths = [
"metadata/modules.json",
]
self._modules = self._load_metadata(paths, productmd.modules.Modules)
return self._modules | (:class:`productmd.modules.Modules`) -- Compose Modules metadata | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/compose.py#L130-L139 | [
"def _load_metadata(self, paths, cls):\n path = self._find_metadata_file(paths)\n obj = cls()\n try:\n obj.load(path)\n except ValueError:\n raise RuntimeError('%s is not a valid JSON file.' % path)\n return obj\n"
] | class Compose(object):
"""
This class provides easy access to compose metadata.
:param compose_path: Path to a compose. HTTP(s) URL is also accepted.
:type compose_path: str
"""
def __init__(self, compose_path):
# example: MYPRODUCT-1.0-YYYYMMDD.0/metadata
self.compose_path = compose_path
# example: MYPRODUCT-1.0-YYYYMMDD.0/compose/metadata (preferred location)
path = os.path.join(compose_path, "compose")
if _file_exists(path):
self.compose_path = path
elif "://" not in compose_path and os.path.exists(compose_path):
# Scan all subdirs under compose_path for 'metadata'. Doesn't work over HTTP.
# example: MYPRODUCT-1.0-YYYYMMDD.0/1.0/metadata (legacy location)
for i in os.listdir(compose_path):
path = os.path.join(compose_path, i)
metadata_path = os.path.join(path, "metadata")
if _file_exists(metadata_path):
self.compose_path = path
break
self._composeinfo = None
self._images = None
self._rpms = None
self._modules = None
def _find_metadata_file(self, paths):
for i in paths:
path = os.path.join(self.compose_path, i)
if _file_exists(path):
return path
raise RuntimeError('Failed to load metadata from %s' % self.compose_path)
@property
def info(self):
"""(:class:`productmd.composeinfo.ComposeInfo`) -- Compose metadata"""
if self._composeinfo is not None:
return self._composeinfo
paths = [
"metadata/composeinfo.json",
]
self._composeinfo = self._load_metadata(paths, productmd.composeinfo.ComposeInfo)
return self._composeinfo
@property
def images(self):
"""(:class:`productmd.images.Images`) -- Compose images metadata"""
if self._images is not None:
return self._images
paths = [
"metadata/images.json",
"metadata/image-manifest.json",
]
self._images = self._load_metadata(paths, productmd.images.Images)
return self._images
@property
def rpms(self):
"""(:class:`productmd.rpms.Rpms`) -- Compose RPMs metadata"""
if self._rpms is not None:
return self._rpms
paths = [
"metadata/rpms.json",
"metadata/rpm-manifest.json",
]
self._rpms = self._load_metadata(paths, productmd.rpms.Rpms)
return self._rpms
@property
def _load_metadata(self, paths, cls):
path = self._find_metadata_file(paths)
obj = cls()
try:
obj.load(path)
except ValueError:
raise RuntimeError('%s is not a valid JSON file.' % path)
return obj
|
release-engineering/productmd | productmd/composeinfo.py | BaseProduct._validate_version | python | def _validate_version(self):
self._assert_type("version", list(six.string_types))
self._assert_matches_re("version", [RELEASE_VERSION_RE]) | If the version starts with a digit, it must be a sematic-versioning
style string. | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/composeinfo.py#L422-L427 | null | class BaseProduct(productmd.common.MetadataBase):
"""
This class represents a base product a release is based on.
For example: Spacewalk 2.2 release requires Fedora 20 base product.
Information from this class is used only if release.is_layered is set.
"""
def __init__(self, metadata):
super(BaseProduct, self).__init__()
self._section = "base_product"
self._metadata = metadata
self.name = None #: (*str*) -- Product name, for example: "Fedora", "Red Hat Enterprise Linux"
self.version = None #: (*str*) -- Product version (typically major version), for example: "20", "7"
self.short = None #: (*str*) -- Product short name, for example: "f", "rhel"
self.type = None #: (*str*) -- Product type, for example: "ga", "eus"
def __repr__(self):
return u'<%s:%s:%s>' % (self.__class__.__name__, self.name, self.version)
def __cmp__(self, other):
if self.name != other.name:
raise ValueError("Comparing incompatible products: %s vs %s" % (self.name, other.name))
if self.short != other.short:
raise ValueError("Comparing incompatible products: %s vs %s" % (self.short, other.short))
if self.version != other.version:
return cmp(productmd.common.split_version(self.version), productmd.common.split_version(other.version))
return 0
def __str__(self):
return "%s-%s" % (self.short, self.version)
def _validate_name(self):
self._assert_type("name", list(six.string_types))
def _validate_short(self):
self._assert_type("short", list(six.string_types))
def _validate_type(self):
self._assert_type("type", list(six.string_types))
self._assert_value("type", productmd.common.RELEASE_TYPES)
@property
def major_version(self):
if self.version is None:
return None
return productmd.common.get_major_version(self.version)
@property
def minor_version(self):
if self.version is None:
return None
return productmd.common.get_minor_version(self.version)
@property
def type_suffix(self):
"""This is used in compose ID."""
if not self.type or self.type.lower() == 'ga':
return ''
return '-%s' % self.type.lower()
def serialize(self, data):
self.validate()
data[self._section] = {}
data[self._section]["name"] = self.name
data[self._section]["version"] = self.version
data[self._section]["short"] = self.short
data[self._section]["type"] = self.type
def deserialize(self, data):
self.name = data[self._section]["name"]
self.version = data[self._section]["version"]
self.short = data[self._section]["short"]
self.type = data[self._section].get("type", "ga")
self.validate()
|
release-engineering/productmd | productmd/composeinfo.py | BaseProduct.type_suffix | python | def type_suffix(self):
if not self.type or self.type.lower() == 'ga':
return ''
return '-%s' % self.type.lower() | This is used in compose ID. | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/composeinfo.py#L449-L453 | null | class BaseProduct(productmd.common.MetadataBase):
"""
This class represents a base product a release is based on.
For example: Spacewalk 2.2 release requires Fedora 20 base product.
Information from this class is used only if release.is_layered is set.
"""
def __init__(self, metadata):
super(BaseProduct, self).__init__()
self._section = "base_product"
self._metadata = metadata
self.name = None #: (*str*) -- Product name, for example: "Fedora", "Red Hat Enterprise Linux"
self.version = None #: (*str*) -- Product version (typically major version), for example: "20", "7"
self.short = None #: (*str*) -- Product short name, for example: "f", "rhel"
self.type = None #: (*str*) -- Product type, for example: "ga", "eus"
def __repr__(self):
return u'<%s:%s:%s>' % (self.__class__.__name__, self.name, self.version)
def __cmp__(self, other):
if self.name != other.name:
raise ValueError("Comparing incompatible products: %s vs %s" % (self.name, other.name))
if self.short != other.short:
raise ValueError("Comparing incompatible products: %s vs %s" % (self.short, other.short))
if self.version != other.version:
return cmp(productmd.common.split_version(self.version), productmd.common.split_version(other.version))
return 0
def __str__(self):
return "%s-%s" % (self.short, self.version)
def _validate_name(self):
self._assert_type("name", list(six.string_types))
def _validate_version(self):
"""If the version starts with a digit, it must be a sematic-versioning
style string.
"""
self._assert_type("version", list(six.string_types))
self._assert_matches_re("version", [RELEASE_VERSION_RE])
def _validate_short(self):
self._assert_type("short", list(six.string_types))
def _validate_type(self):
self._assert_type("type", list(six.string_types))
self._assert_value("type", productmd.common.RELEASE_TYPES)
@property
def major_version(self):
if self.version is None:
return None
return productmd.common.get_major_version(self.version)
@property
def minor_version(self):
if self.version is None:
return None
return productmd.common.get_minor_version(self.version)
@property
def serialize(self, data):
self.validate()
data[self._section] = {}
data[self._section]["name"] = self.name
data[self._section]["version"] = self.version
data[self._section]["short"] = self.short
data[self._section]["type"] = self.type
def deserialize(self, data):
self.name = data[self._section]["name"]
self.version = data[self._section]["version"]
self.short = data[self._section]["short"]
self.type = data[self._section].get("type", "ga")
self.validate()
|
release-engineering/productmd | productmd/composeinfo.py | VariantBase.get_variants | python | def get_variants(self, arch=None, types=None, recursive=False):
types = types or []
result = []
if "self" in types:
result.append(self)
for variant in six.itervalues(self.variants):
if types and variant.type not in types:
continue
if arch and arch not in variant.arches.union(["src"]):
continue
result.append(variant)
if recursive:
result.extend(variant.get_variants(types=[i for i in types if i != "self"], recursive=True))
result.sort(key=lambda x: x.uid)
return result | Return all variants of given arch and types.
Supported variant types:
self - include the top-level ("self") variant as well
addon
variant
optional | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/composeinfo.py#L605-L631 | null | class VariantBase(productmd.common.MetadataBase):
def __init__(self, metadata):
super(VariantBase, self).__init__()
self._metadata = metadata
self.parent = None
self.variants = {}
def __repr__(self):
if hasattr(self, "compose"):
return u'<%s:%s>' % (self.__class__.__name__, self._metadata.compose.id)
else:
return super(VariantBase, self).__repr__()
def __getitem__(self, name):
# There can be exceptions, like $variant-optional on top-level,
# because optional lives in a separate tree
if name not in self.variants and "-" in name:
# look for the UID first
for i in self.variants:
var = self.variants[i]
if var.uid == name:
return var
# if UID is not found, split and look for variant matching the parts
head, tail = name.split("-", 1)
return self.variants[head][tail]
return self.variants[name]
def __delitem__(self, name):
if name not in self.variants and "-" in name:
head, tail = name.split("-", 1)
del self.variants[head][tail]
else:
del self.variants[name]
def __iter__(self):
for i in sorted(self.variants.keys()):
yield i
def __len__(self):
return len(self.variants)
def _validate_variants(self):
for variant_id in self:
variant = self[variant_id]
if variant.id != variant_id:
raise ValueError("Variant ID doesn't match: '%s' vs '%s'" % (variant.id, variant_id))
def add(self, variant, variant_id=None):
if hasattr(self, "uid"):
# detect Variant; we don't want to set parent for VariantBase or Variants
variant.parent = self
variant.validate()
variant_id = variant_id or variant.id
if hasattr(self, "parent"):
parents = self._get_all_parents()
if variant in parents:
parent_uids = sorted([i.uid for i in parents])
raise ValueError("Dependency cycle detected; variant %s; parents: %s" % (variant.uid, parent_uids))
new_variant = self.variants.setdefault(variant_id, variant)
if new_variant != variant:
raise ValueError("Variant ID already exists: %s" % variant.id)
def _get_all_parents(self):
result = [self]
if self.parent:
result.extend(self.parent._get_all_parents())
return result
|
release-engineering/productmd | productmd/rpms.py | Rpms.add | python | def add(self, variant, arch, nevra, path, sigkey, category, srpm_nevra=None):
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if category not in SUPPORTED_CATEGORIES:
raise ValueError("Invalid category value: %s" % category)
if path.startswith("/"):
raise ValueError("Relative path expected: %s" % path)
nevra, nevra_dict = self._check_nevra(nevra)
if category == "source" and srpm_nevra is not None:
raise ValueError("Expected blank srpm_nevra for source package: %s" % nevra)
if category != "source" and srpm_nevra is None:
raise ValueError("Missing srpm_nevra for package: %s" % nevra)
if (category == "source") != (nevra_dict["arch"] in ("src", "nosrc")):
raise ValueError("Invalid category/arch combination: %s/%s" % (category, nevra))
if sigkey is not None:
sigkey = sigkey.lower()
if srpm_nevra:
srpm_nevra, _ = self._check_nevra(srpm_nevra)
else:
srpm_nevra = nevra
arches = self.rpms.setdefault(variant, {})
srpms = arches.setdefault(arch, {})
rpms = srpms.setdefault(srpm_nevra, {})
rpms[nevra] = {"sigkey": sigkey, "path": path, "category": category} | Map RPM to to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param nevra: name-epoch:version-release.arch
:type nevra: str
:param sigkey: sigkey hash
:type sigkey: str or None
:param category: RPM category, one of binary, debug, source
:type category: str
:param srpm_nevra: name-epoch:version-release.arch of RPM's SRPM
:type srpm_nevra: str | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/rpms.py#L133-L185 | [
"def _check_nevra(self, nevra):\n if \":\" not in nevra:\n raise ValueError(\"Missing epoch in N-E:V-R.A: %s\" % nevra)\n\n try:\n nevra_dict = productmd.common.parse_nvra(nevra)\n except ValueError:\n raise ValueError(\"Invalid N-E:V-R.A: %s\" % nevra)\n\n nevra_dict[\"epoch\"] = n... | class Rpms(productmd.common.MetadataBase):
def __init__(self):
super(Rpms, self).__init__()
self.header = Header(self, "productmd.rpms")
self.compose = Compose(self)
self.rpms = {}
def __getitem__(self, variant):
return self.rpms[variant]
def __delitem__(self, variant):
del self.rpms[variant]
def _check_nevra(self, nevra):
if ":" not in nevra:
raise ValueError("Missing epoch in N-E:V-R.A: %s" % nevra)
try:
nevra_dict = productmd.common.parse_nvra(nevra)
except ValueError:
raise ValueError("Invalid N-E:V-R.A: %s" % nevra)
nevra_dict["epoch"] = nevra_dict["epoch"] or 0
nevra = "%(name)s-%(epoch)s:%(version)s-%(release)s.%(arch)s" % nevra_dict
return nevra, nevra_dict
def serialize(self, parser):
data = parser
self.header.serialize(data)
data["payload"] = {}
data["payload"]["rpms"] = {}
self.compose.serialize(data["payload"])
data["payload"]["rpms"] = self.rpms
return data
def deserialize(self, data):
self.header.deserialize(data)
if self.header.version_tuple <= (0, 3):
self.deserialize_0_3(data)
else:
self.deserialize_1_0(data)
self.validate()
self.header.set_current_version()
def deserialize_0_3(self, data):
self.compose.deserialize(data["payload"])
payload = data["payload"]["manifest"]
self.rpms = {}
for variant in payload:
for arch in payload[variant]:
if arch == "src":
continue
for srpm_nevra, rpms in payload[variant][arch].items():
srpm_data = payload[variant].get("src", {}).get(srpm_nevra, None)
for rpm_nevra, rpm_data in rpms.items():
category = rpm_data["type"]
if category == "package":
category = "binary"
self.add(variant, arch, rpm_nevra, rpm_data["path"], rpm_data["sigkey"], category, srpm_nevra)
if srpm_data is not None:
self.add(variant, arch, srpm_nevra, srpm_data["path"], srpm_data["sigkey"], "source")
def deserialize_1_0(self, data):
self.compose.deserialize(data["payload"])
self.rpms = data["payload"]["rpms"]
|
release-engineering/productmd | productmd/images.py | identify_image | python | def identify_image(image):
try:
# Image instance case
attrs = tuple(getattr(image, attr) for attr in UNIQUE_IMAGE_ATTRIBUTES)
except AttributeError:
# Plain dict case
attrs = tuple(image.get(attr, None) for attr in UNIQUE_IMAGE_ATTRIBUTES)
ui = UniqueImage(*attrs)
# If unified is None (which could happen in the dict case, we want default
# value of False instead. Also convert additional_variants to a list.
return ui._replace(
unified=ui.unified or False, additional_variants=ui.additional_variants or []
) | Provides a tuple of image's UNIQUE_IMAGE_ATTRIBUTES. Note:
this is not guaranteed to be unique (and will often not be)
for pre-1.1 metadata, as subvariant did not exist. Provided as
a function so consumers can use it on plain image dicts read from
the metadata or PDC. | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/images.py#L207-L225 | null | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides classes for manipulating images.json files.
images.json files provide details about images included in composes.
Example::
import productmd.compose
compose = productmd.compose.Compose("/path/to/compose")
# Print the entire dict that maps all variants, arches, and images for this
# compose:
print(compose.images.images)
# Find all the qcow2 images in this compose:
qcow2s = set()
for variant in compose.images.images:
for arch in compose.images.images[variant]:
for images in compose.images.images[variant].values():
if image.type == 'qcow2':
qcow2s.add(image)
print(qcow2s)
# ... prints the set of qcow2 images in all our variants:
[<Image:Server-RT/x86_64/images/rhel-kvm-rt-image-7.6-220.x86_64.qcow2:qcow2:x86_64>,
<Image:Server/x86_64/images/rhel-guest-image-7.6-210.x86_64.qcow2:qcow2:x86_64>,
<Image:Server/ppc64le/images/rhel-guest-image-7.6-210.ppc64le.qcow2:qcow2:ppc64le>]
"""
import productmd.common
from productmd.common import Header
from productmd.composeinfo import Compose
from collections import namedtuple
from itertools import chain
import six
__all__ = (
"Image",
"Images",
"IMAGE_TYPE_FORMAT_MAPPING"
"SUPPORTED_IMAGE_TYPES",
"SUPPORTED_IMAGE_FORMATS",
"UNIQUE_IMAGE_ATTRIBUTES",
"UniqueImage",
)
IMAGE_TYPE_FORMAT_MAPPING = {
'boot': ['iso'],
'cd': ['iso'],
'docker': ['tar.gz', 'tar.xz'],
'dvd': ['iso'],
# Usually non-bootable image which contains a repo with debuginfo packages.
'dvd-debuginfo': ['iso'],
# installer image that deploys a payload containing an ostree-based
# distribution
'dvd-ostree': ['iso'],
'ec2': [],
'kvm': [],
'live': [],
'liveimg-squashfs': ['liveimg.squashfs'],
'netinst': ['iso'],
'p2v': [],
'qcow': ['qcow'],
'qcow2': ['qcow2'],
'raw': ['raw'],
'raw-xz': ['raw.xz'],
'rescue': [],
'rhevm-ova': ['rhevm.ova'],
# raw disk image named `disk.raw` stuffed into a gzipped tarball
# format required for import by Google Compute Engine:
# https://cloud.google.com/compute/docs/images/import-existing-image
'tar-gz': ['tar.gz'],
'vagrant-hyperv': ['vagrant-hyperv.box'],
'vagrant-libvirt': ['vagrant-libvirt.box'],
'vagrant-virtualbox': ['vagrant-virtualbox.box'],
'vagrant-vmware-fusion': ['vagrant-vmware-fusion.box'],
'vdi': ['vdi'],
'vmdk': ['vmdk'],
'vpc': ['vpc'],
'vsphere-ova': ['vsphere.ova'],
}
#: supported image types
SUPPORTED_IMAGE_TYPES = list(sorted(IMAGE_TYPE_FORMAT_MAPPING.keys()))
#: supported image formats, they match with file suffix
SUPPORTED_IMAGE_FORMATS = list(sorted(set(chain(*IMAGE_TYPE_FORMAT_MAPPING.values()))))
#: combination of attributes which uniquely identifies an image across composes
UNIQUE_IMAGE_ATTRIBUTES = [
"subvariant",
"type",
"format",
"arch",
"disc_number",
"unified",
"additional_variants",
]
#: a namedtuple with unique attributes, use ``identify_image`` to create an instance
UniqueImage = namedtuple('UniqueImage', UNIQUE_IMAGE_ATTRIBUTES)
class Images(productmd.common.MetadataBase):
def __init__(self):
super(Images, self).__init__()
self.header = Header(self, "productmd.images")
self.compose = Compose(self)
self.images = {}
def __getitem__(self, variant):
return self.images[variant]
def __delitem__(self, variant):
del self.images[variant]
def serialize(self, parser):
data = parser
self.header.serialize(data)
data["payload"] = {}
data["payload"]["images"] = {}
self.compose.serialize(data["payload"])
for variant in self.images:
for arch in self.images[variant]:
for image_obj in self.images[variant][arch]:
images = data["payload"]["images"].setdefault(variant, {}).setdefault(arch, [])
image_obj.serialize(images)
images.sort(key=lambda x: x["path"])
return data
def deserialize(self, data):
self.header.deserialize(data)
self.compose.deserialize(data["payload"])
for variant in data["payload"]["images"]:
for arch in data["payload"]["images"][variant]:
for image in data["payload"]["images"][variant][arch]:
image_obj = Image(self)
image_obj.deserialize(image)
if self.header.version_tuple <= (1, 1):
self._add_1_1(data, variant, arch, image_obj)
else:
self.add(variant, arch, image_obj)
self.header.set_current_version()
def _add_1_1(self, data, variant, arch, image):
if arch == "src":
# move src under binary arches
for variant_arch in data["payload"]["images"][variant]:
if variant_arch == "src":
continue
self.add(variant, variant_arch, image)
else:
self.add(variant, arch, image)
def add(self, variant, arch, image):
"""
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
"""
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if self.header.version_tuple >= (1, 1):
# disallow adding a different image with same 'unique'
# attributes. can't do this pre-1.1 as we couldn't truly
# identify images before subvariant
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError("Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with "
"image {1}! This is forbidden.".format(image, curimg))
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image)
class Image(productmd.common.MetadataBase):
def __init__(self, parent):
super(Image, self).__init__()
self.parent = parent
self.path = None #: (*str*) -- relative path to an image, for example: "Server/x86_64/iso/boot.iso"
self.mtime = None #: (*int*) -- image mtime
self.size = None #: (*int*) -- image size
self.volume_id = None #: (*str*) --
self.type = None #: (*str*) --
self.format = None #: (*str*) -- Release name, for example: "Fedora", "Red Hat Enterprise Linux"
self.arch = None #: (*str*) -- image architecture, for example: "x86_64", "src"
self.disc_number = None #: (*int*) -- Release name, for example: "Fedora", "Red Hat Enterprise Linux"
self.disc_count = None #: (*int*) -- Release name, for example: "Fedora", "Red Hat Enterprise Linux"
self.checksums = {} #: (*str*) -- Release name, for example: "Fedora", "Red Hat Enterprise Linux"
self.implant_md5 = None #: (*str* or *None*) -- value of implanted md5
self.bootable = False #: (*bool=False*) --
self.subvariant = None #: (*str*) -- image contents, may be same as variant or e.g. 'KDE', 'LXDE'
self.unified = False #: (*bool=False*) -- indicates if the ISO contains content from multiple variants
self.additional_variants = [] #: (*[str]*) -- indicates which variants are present on the ISO
def __repr__(self):
return "<Image:{0.path}:{0.format}:{0.arch}>".format(self)
def _validate_path(self):
self._assert_type("path", list(six.string_types))
self._assert_not_blank("path")
def _validate_mtime(self):
self._assert_type("mtime", list(six.integer_types))
def _validate_size(self):
self._assert_type("size", list(six.integer_types))
self._assert_not_blank("size")
def _validate_volume_id(self):
self._assert_type("volume_id", [type(None)] + list(six.string_types))
if self.volume_id is not None:
self._assert_not_blank("volume_id")
def _validate_type(self):
self._assert_type("type", list(six.string_types))
self._assert_value("type", SUPPORTED_IMAGE_TYPES)
def _validate_format(self):
self._assert_type("format", list(six.string_types))
self._assert_value("format", SUPPORTED_IMAGE_FORMATS)
def _validate_arch(self):
self._assert_type("arch", list(six.string_types))
self._assert_not_blank("arch")
def _validate_disc_number(self):
self._assert_type("disc_number", list(six.integer_types))
def _validate_disc_count(self):
self._assert_type("disc_count", list(six.integer_types))
def _validate_checksums(self):
self._assert_type("checksums", [dict])
self._assert_not_blank("checksums")
def _validate_implant_md5(self):
self._assert_type("implant_md5", [type(None)] + list(six.string_types))
if self.implant_md5 is not None:
self._assert_matches_re("implant_md5", [r"^[a-z0-9]{32}$"])
def _validate_bootable(self):
self._assert_type("bootable", [bool])
def _validate_subvariant(self):
self._assert_type("subvariant", list(six.string_types))
def _validate_unified(self):
self._assert_type("unified", [bool])
def _validate_merges_variants(self):
self._assert_type("additional_variants", [list])
if self.additional_variants and not self.unified:
raise ValueError("Only unified images can contain multiple variants")
def serialize(self, parser):
data = parser
self.validate()
result = {
"path": self.path,
"mtime": self.mtime,
"size": self.size,
"volume_id": self.volume_id,
"type": self.type,
"format": self.format,
"arch": self.arch,
"disc_number": self.disc_number,
"disc_count": self.disc_count,
"checksums": self.checksums,
"implant_md5": self.implant_md5,
"bootable": self.bootable,
"subvariant": self.subvariant,
}
if self.unified:
# Only add the `unified` field if it doesn't have the default value.
result['unified'] = self.unified
result["additional_variants"] = self.additional_variants
data.append(result)
def deserialize(self, data):
self.path = data["path"]
self.mtime = int(data["mtime"])
self.size = int(data["size"])
self.volume_id = data["volume_id"]
self.type = data["type"]
self.format = data.get("format", "iso")
self.arch = data["arch"]
self.disc_number = int(data["disc_number"])
self.disc_count = int(data["disc_count"])
self.checksums = data["checksums"]
self.implant_md5 = data["implant_md5"]
self.bootable = bool(data["bootable"])
if self.parent.header.version_tuple <= (1, 0):
self.subvariant = data.get("subvariant", "")
else:
# 1.1+
self.subvariant = data["subvariant"]
self.unified = data.get('unified', False)
self.additional_variants = data.get("additional_variants", [])
self.validate()
def add_checksum(self, root, checksum_type, checksum_value):
if checksum_type in self.checksums:
if checksum_value and checksum_value != self.checksums[checksum_type]:
raise ValueError("Existing and added checksums do not match: %s vs %s" % (self.checksums[checksum_type], checksum_value))
return self.checksums[checksum_type]
self.checksums[checksum_type] = checksum_value
return checksum_value
|
release-engineering/productmd | productmd/images.py | Images.add | python | def add(self, variant, arch, image):
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if self.header.version_tuple >= (1, 1):
# disallow adding a different image with same 'unique'
# attributes. can't do this pre-1.1 as we couldn't truly
# identify images before subvariant
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError("Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with "
"image {1}! This is forbidden.".format(image, curimg))
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image) | Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image` | train | https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/images.py#L178-L204 | [
"def identify_image(image):\n \"\"\"Provides a tuple of image's UNIQUE_IMAGE_ATTRIBUTES. Note:\n this is not guaranteed to be unique (and will often not be)\n for pre-1.1 metadata, as subvariant did not exist. Provided as\n a function so consumers can use it on plain image dicts read from\n the metad... | class Images(productmd.common.MetadataBase):
def __init__(self):
super(Images, self).__init__()
self.header = Header(self, "productmd.images")
self.compose = Compose(self)
self.images = {}
def __getitem__(self, variant):
return self.images[variant]
def __delitem__(self, variant):
del self.images[variant]
def serialize(self, parser):
data = parser
self.header.serialize(data)
data["payload"] = {}
data["payload"]["images"] = {}
self.compose.serialize(data["payload"])
for variant in self.images:
for arch in self.images[variant]:
for image_obj in self.images[variant][arch]:
images = data["payload"]["images"].setdefault(variant, {}).setdefault(arch, [])
image_obj.serialize(images)
images.sort(key=lambda x: x["path"])
return data
def deserialize(self, data):
self.header.deserialize(data)
self.compose.deserialize(data["payload"])
for variant in data["payload"]["images"]:
for arch in data["payload"]["images"][variant]:
for image in data["payload"]["images"][variant][arch]:
image_obj = Image(self)
image_obj.deserialize(image)
if self.header.version_tuple <= (1, 1):
self._add_1_1(data, variant, arch, image_obj)
else:
self.add(variant, arch, image_obj)
self.header.set_current_version()
def _add_1_1(self, data, variant, arch, image):
if arch == "src":
# move src under binary arches
for variant_arch in data["payload"]["images"][variant]:
if variant_arch == "src":
continue
self.add(variant, variant_arch, image)
else:
self.add(variant, arch, image)
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/usefulFunctions.py | uniqueStates | python | def uniqueStates(states,rates):
"""
Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state
"""
order = np.lexsort(states.T)
states = states[order]
diff = np.ones(len(states), 'bool')
diff[1:] = (states[1:] != states[:-1]).any(-1)
sums = np.bincount(diff.cumsum() - 1, rates[order])
return states[diff], sums | Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/usefulFunctions.py#L4-L16 | null | from __future__ import print_function
import numpy as np
def uniqueStates(states,rates):
"""
Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state
"""
order = np.lexsort(states.T)
states = states[order]
diff = np.ones(len(states), 'bool')
diff[1:] = (states[1:] != states[:-1]).any(-1)
sums = np.bincount(diff.cumsum() - 1, rates[order])
return states[diff], sums
def number_of_partitions(max_range, max_sum):
'''
Returns an array arr of the same shape as max_range, where
arr[j] = number of admissible partitions for
j summands bounded by max_range[j:] and with sum <= max_sum
'''
M = max_sum + 1
N = len(max_range)
arr = np.zeros(shape=(M,N), dtype = int)
arr[:,-1] = np.where(np.arange(M) <= min(max_range[-1], max_sum), 1, 0)
for i in range(N-2,-1,-1):
for j in range(max_range[i]+1):
arr[j:,i] += arr[:M-j,i+1]
return arr.sum(axis = 0)
def partition_zero(max_range, max_sum, out = None, n_part = None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
'''
if out is None:
max_range = np.asarray(max_range, dtype = int).ravel()
n_part = number_of_partitions(max_range, max_sum)
out = np.zeros(shape = (n_part[0], max_range.size), dtype = int)
if(max_range.size == 1):
out[:] = np.arange(min(max_range[0],max_sum) + 1, dtype = int).reshape(-1,1)
return out
P = partition_zero(max_range[1:], max_sum, out=out[:n_part[1],1:], n_part = n_part[1:])
S = np.minimum(max_sum - P.sum(axis = 1), max_range[0])
offset, sz = 0, S.size
out[:sz,0] = 0
for i in range(1, max_range[0]+1):
ind, = np.nonzero(S)
offset, sz = offset + sz, ind.size
out[offset:offset+sz, 0] = i
out[offset:offset+sz, 1:] = P[ind]
S[ind] -= 1
return out
def partition(min_range,max_range,max_sum=None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `min_range[j],...,max_range[j]` for each j.
If the argument `max_sum` is specified, the numbers will add up to at most `max_sum`
Parameters
----------
min_range : array or list of ints
`min_range[j]` gives the minimum value for element `j` in the output array.
max_range : array or list of ints
`max_range[j]` gives the maximum value for element `j` in the output array.
max_sum : int
Optional argument. The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `min_range[j],...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> min_range=np.array([1,2,1])
>>> max_range=np.array([2,4,3])
>>> max_sum = 6
>>> partition(min_range,max_range,max_sum)
array([[1, 2, 1],
[1, 2, 2],
[1, 2, 3],
[1, 3, 1],
[1, 3, 2],
[1, 4, 1],
[2, 2, 1],
[2, 2, 2],
[2, 3, 1]])
'''
max_range = np.asarray(max_range, dtype = int).ravel()
min_range = np.asarray(min_range, dtype = int).ravel()
full_range = max_range-min_range
if any(full_range<0):
raise ValueError("max_range needs to be larger than min_range")
if max_sum == None:
max_sum = np.sum(full_range)
else:
max_sum -= np.sum(min_range)
out = partition_zero(full_range,max_sum)
out += min_range
return out |
gvanderheide/discreteMarkovChain | discreteMarkovChain/usefulFunctions.py | number_of_partitions | python | def number_of_partitions(max_range, max_sum):
'''
Returns an array arr of the same shape as max_range, where
arr[j] = number of admissible partitions for
j summands bounded by max_range[j:] and with sum <= max_sum
'''
M = max_sum + 1
N = len(max_range)
arr = np.zeros(shape=(M,N), dtype = int)
arr[:,-1] = np.where(np.arange(M) <= min(max_range[-1], max_sum), 1, 0)
for i in range(N-2,-1,-1):
for j in range(max_range[i]+1):
arr[j:,i] += arr[:M-j,i+1]
return arr.sum(axis = 0) | Returns an array arr of the same shape as max_range, where
arr[j] = number of admissible partitions for
j summands bounded by max_range[j:] and with sum <= max_sum | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/usefulFunctions.py#L18-L31 | null | from __future__ import print_function
import numpy as np
def uniqueStates(states,rates):
"""
Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state
"""
order = np.lexsort(states.T)
states = states[order]
diff = np.ones(len(states), 'bool')
diff[1:] = (states[1:] != states[:-1]).any(-1)
sums = np.bincount(diff.cumsum() - 1, rates[order])
return states[diff], sums
def partition_zero(max_range, max_sum, out = None, n_part = None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
'''
if out is None:
max_range = np.asarray(max_range, dtype = int).ravel()
n_part = number_of_partitions(max_range, max_sum)
out = np.zeros(shape = (n_part[0], max_range.size), dtype = int)
if(max_range.size == 1):
out[:] = np.arange(min(max_range[0],max_sum) + 1, dtype = int).reshape(-1,1)
return out
P = partition_zero(max_range[1:], max_sum, out=out[:n_part[1],1:], n_part = n_part[1:])
S = np.minimum(max_sum - P.sum(axis = 1), max_range[0])
offset, sz = 0, S.size
out[:sz,0] = 0
for i in range(1, max_range[0]+1):
ind, = np.nonzero(S)
offset, sz = offset + sz, ind.size
out[offset:offset+sz, 0] = i
out[offset:offset+sz, 1:] = P[ind]
S[ind] -= 1
return out
def partition(min_range,max_range,max_sum=None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `min_range[j],...,max_range[j]` for each j.
If the argument `max_sum` is specified, the numbers will add up to at most `max_sum`
Parameters
----------
min_range : array or list of ints
`min_range[j]` gives the minimum value for element `j` in the output array.
max_range : array or list of ints
`max_range[j]` gives the maximum value for element `j` in the output array.
max_sum : int
Optional argument. The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `min_range[j],...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> min_range=np.array([1,2,1])
>>> max_range=np.array([2,4,3])
>>> max_sum = 6
>>> partition(min_range,max_range,max_sum)
array([[1, 2, 1],
[1, 2, 2],
[1, 2, 3],
[1, 3, 1],
[1, 3, 2],
[1, 4, 1],
[2, 2, 1],
[2, 2, 2],
[2, 3, 1]])
'''
max_range = np.asarray(max_range, dtype = int).ravel()
min_range = np.asarray(min_range, dtype = int).ravel()
full_range = max_range-min_range
if any(full_range<0):
raise ValueError("max_range needs to be larger than min_range")
if max_sum == None:
max_sum = np.sum(full_range)
else:
max_sum -= np.sum(min_range)
out = partition_zero(full_range,max_sum)
out += min_range
return out |
gvanderheide/discreteMarkovChain | discreteMarkovChain/usefulFunctions.py | partition_zero | python | def partition_zero(max_range, max_sum, out = None, n_part = None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
'''
if out is None:
max_range = np.asarray(max_range, dtype = int).ravel()
n_part = number_of_partitions(max_range, max_sum)
out = np.zeros(shape = (n_part[0], max_range.size), dtype = int)
if(max_range.size == 1):
out[:] = np.arange(min(max_range[0],max_sum) + 1, dtype = int).reshape(-1,1)
return out
P = partition_zero(max_range[1:], max_sum, out=out[:n_part[1],1:], n_part = n_part[1:])
S = np.minimum(max_sum - P.sum(axis = 1), max_range[0])
offset, sz = 0, S.size
out[:sz,0] = 0
for i in range(1, max_range[0]+1):
ind, = np.nonzero(S)
offset, sz = offset + sz, ind.size
out[offset:offset+sz, 0] = i
out[offset:offset+sz, 1:] = P[ind]
S[ind] -= 1
return out | Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]]) | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/usefulFunctions.py#L33-L93 | [
"def number_of_partitions(max_range, max_sum):\n '''\n Returns an array arr of the same shape as max_range, where\n arr[j] = number of admissible partitions for \n j summands bounded by max_range[j:] and with sum <= max_sum\n '''\n M = max_sum + 1\n N = len(max_range) \n arr = np.ze... | from __future__ import print_function
import numpy as np
def uniqueStates(states,rates):
"""
Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state
"""
order = np.lexsort(states.T)
states = states[order]
diff = np.ones(len(states), 'bool')
diff[1:] = (states[1:] != states[:-1]).any(-1)
sums = np.bincount(diff.cumsum() - 1, rates[order])
return states[diff], sums
def number_of_partitions(max_range, max_sum):
'''
Returns an array arr of the same shape as max_range, where
arr[j] = number of admissible partitions for
j summands bounded by max_range[j:] and with sum <= max_sum
'''
M = max_sum + 1
N = len(max_range)
arr = np.zeros(shape=(M,N), dtype = int)
arr[:,-1] = np.where(np.arange(M) <= min(max_range[-1], max_sum), 1, 0)
for i in range(N-2,-1,-1):
for j in range(max_range[i]+1):
arr[j:,i] += arr[:M-j,i+1]
return arr.sum(axis = 0)
def partition_zero(max_range, max_sum, out = None, n_part = None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
'''
if out is None:
max_range = np.asarray(max_range, dtype = int).ravel()
n_part = number_of_partitions(max_range, max_sum)
out = np.zeros(shape = (n_part[0], max_range.size), dtype = int)
if(max_range.size == 1):
out[:] = np.arange(min(max_range[0],max_sum) + 1, dtype = int).reshape(-1,1)
return out
P = partition_zero(max_range[1:], max_sum, out=out[:n_part[1],1:], n_part = n_part[1:])
S = np.minimum(max_sum - P.sum(axis = 1), max_range[0])
offset, sz = 0, S.size
out[:sz,0] = 0
for i in range(1, max_range[0]+1):
ind, = np.nonzero(S)
offset, sz = offset + sz, ind.size
out[offset:offset+sz, 0] = i
out[offset:offset+sz, 1:] = P[ind]
S[ind] -= 1
return out
def partition(min_range,max_range,max_sum=None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `min_range[j],...,max_range[j]` for each j.
If the argument `max_sum` is specified, the numbers will add up to at most `max_sum`
Parameters
----------
min_range : array or list of ints
`min_range[j]` gives the minimum value for element `j` in the output array.
max_range : array or list of ints
`max_range[j]` gives the maximum value for element `j` in the output array.
max_sum : int
Optional argument. The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `min_range[j],...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> min_range=np.array([1,2,1])
>>> max_range=np.array([2,4,3])
>>> max_sum = 6
>>> partition(min_range,max_range,max_sum)
array([[1, 2, 1],
[1, 2, 2],
[1, 2, 3],
[1, 3, 1],
[1, 3, 2],
[1, 4, 1],
[2, 2, 1],
[2, 2, 2],
[2, 3, 1]])
'''
max_range = np.asarray(max_range, dtype = int).ravel()
min_range = np.asarray(min_range, dtype = int).ravel()
full_range = max_range-min_range
if any(full_range<0):
raise ValueError("max_range needs to be larger than min_range")
if max_sum == None:
max_sum = np.sum(full_range)
else:
max_sum -= np.sum(min_range)
out = partition_zero(full_range,max_sum)
out += min_range
return out |
gvanderheide/discreteMarkovChain | discreteMarkovChain/usefulFunctions.py | partition | python | def partition(min_range,max_range,max_sum=None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `min_range[j],...,max_range[j]` for each j.
If the argument `max_sum` is specified, the numbers will add up to at most `max_sum`
Parameters
----------
min_range : array or list of ints
`min_range[j]` gives the minimum value for element `j` in the output array.
max_range : array or list of ints
`max_range[j]` gives the maximum value for element `j` in the output array.
max_sum : int
Optional argument. The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `min_range[j],...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> min_range=np.array([1,2,1])
>>> max_range=np.array([2,4,3])
>>> max_sum = 6
>>> partition(min_range,max_range,max_sum)
array([[1, 2, 1],
[1, 2, 2],
[1, 2, 3],
[1, 3, 1],
[1, 3, 2],
[1, 4, 1],
[2, 2, 1],
[2, 2, 2],
[2, 3, 1]])
'''
max_range = np.asarray(max_range, dtype = int).ravel()
min_range = np.asarray(min_range, dtype = int).ravel()
full_range = max_range-min_range
if any(full_range<0):
raise ValueError("max_range needs to be larger than min_range")
if max_sum == None:
max_sum = np.sum(full_range)
else:
max_sum -= np.sum(min_range)
out = partition_zero(full_range,max_sum)
out += min_range
return out | Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `min_range[j],...,max_range[j]` for each j.
If the argument `max_sum` is specified, the numbers will add up to at most `max_sum`
Parameters
----------
min_range : array or list of ints
`min_range[j]` gives the minimum value for element `j` in the output array.
max_range : array or list of ints
`max_range[j]` gives the maximum value for element `j` in the output array.
max_sum : int
Optional argument. The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `min_range[j],...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> min_range=np.array([1,2,1])
>>> max_range=np.array([2,4,3])
>>> max_sum = 6
>>> partition(min_range,max_range,max_sum)
array([[1, 2, 1],
[1, 2, 2],
[1, 2, 3],
[1, 3, 1],
[1, 3, 2],
[1, 4, 1],
[2, 2, 1],
[2, 2, 2],
[2, 3, 1]]) | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/usefulFunctions.py#L95-L142 | [
"def partition_zero(max_range, max_sum, out = None, n_part = None):\n '''\n Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes. \n Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at... | from __future__ import print_function
import numpy as np
def uniqueStates(states,rates):
"""
Returns unique states and sums up the corresponding rates.
States should be a 2d numpy array with on each row a state, and rates a 1d numpy array with length equal to the number of rows in states.
This may be helpful in the transition function for summing up the rates of different transitions that lead to the same state
"""
order = np.lexsort(states.T)
states = states[order]
diff = np.ones(len(states), 'bool')
diff[1:] = (states[1:] != states[:-1]).any(-1)
sums = np.bincount(diff.cumsum() - 1, rates[order])
return states[diff], sums
def number_of_partitions(max_range, max_sum):
'''
Returns an array arr of the same shape as max_range, where
arr[j] = number of admissible partitions for
j summands bounded by max_range[j:] and with sum <= max_sum
'''
M = max_sum + 1
N = len(max_range)
arr = np.zeros(shape=(M,N), dtype = int)
arr[:,-1] = np.where(np.arange(M) <= min(max_range[-1], max_sum), 1, 0)
for i in range(N-2,-1,-1):
for j in range(max_range[i]+1):
arr[j:,i] += arr[:M-j,i+1]
return arr.sum(axis = 0)
def partition_zero(max_range, max_sum, out = None, n_part = None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
'''
if out is None:
max_range = np.asarray(max_range, dtype = int).ravel()
n_part = number_of_partitions(max_range, max_sum)
out = np.zeros(shape = (n_part[0], max_range.size), dtype = int)
if(max_range.size == 1):
out[:] = np.arange(min(max_range[0],max_sum) + 1, dtype = int).reshape(-1,1)
return out
P = partition_zero(max_range[1:], max_sum, out=out[:n_part[1],1:], n_part = n_part[1:])
S = np.minimum(max_sum - P.sum(axis = 1), max_range[0])
offset, sz = 0, S.size
out[:sz,0] = 0
for i in range(1, max_range[0]+1):
ind, = np.nonzero(S)
offset, sz = offset + sz, ind.size
out[offset:offset+sz, 0] = i
out[offset:offset+sz, 1:] = P[ind]
S[ind] -= 1
return out
def partition(min_range,max_range,max_sum=None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `min_range[j],...,max_range[j]` for each j.
If the argument `max_sum` is specified, the numbers will add up to at most `max_sum`
Parameters
----------
min_range : array or list of ints
`min_range[j]` gives the minimum value for element `j` in the output array.
max_range : array or list of ints
`max_range[j]` gives the maximum value for element `j` in the output array.
max_sum : int
Optional argument. The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `min_range[j],...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> min_range=np.array([1,2,1])
>>> max_range=np.array([2,4,3])
>>> max_sum = 6
>>> partition(min_range,max_range,max_sum)
array([[1, 2, 1],
[1, 2, 2],
[1, 2, 3],
[1, 3, 1],
[1, 3, 2],
[1, 4, 1],
[2, 2, 1],
[2, 2, 2],
[2, 3, 1]])
'''
max_range = np.asarray(max_range, dtype = int).ravel()
min_range = np.asarray(min_range, dtype = int).ravel()
full_range = max_range-min_range
if any(full_range<0):
raise ValueError("max_range needs to be larger than min_range")
if max_sum == None:
max_sum = np.sum(full_range)
else:
max_sum -= np.sum(min_range)
out = partition_zero(full_range,max_sum)
out += min_range
return out |
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.checkInitialState | python | def checkInitialState(self,initialState):
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState | Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L122-L155 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.checkTransitionType | python | def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy | Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L157-L178 | [
"def transition(self, state):\n \"\"\"\n To be provided by the subclass. \n Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.\n For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the tran... | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.convertToTransitionDict | python | def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict | If numpy is used, then this converts the output from transition() into a dict. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L180-L192 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.indirectInitialMatrix | python | def indirectInitialMatrix(self, initialState):
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr() | Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L194-L243 | [
"def transition(self, state):\n \"\"\"\n To be provided by the subclass. \n Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.\n For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the tran... | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.getStateCode | python | def getStateCode(self,state):
return np.dot(state-self.minvalues,self.statecode) | Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L246-L252 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.setStateCodes | python | def setStateCodes(self):
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping | Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L254-L282 | [
"def statespace(self):\n \"\"\"\n To be provided by the subclass. Return the state space\n in an integer 2d numpy array with a state on each row. \n \"\"\"\n raise NotImplementedError('Implement the function statespace() in the subclass') \n",
"def getStateCode(self,state):\n \"\"\" ... | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.getStateIndex | python | def getStateIndex(self,state):
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int) | Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L284-L290 | [
"def getStateCode(self,state):\n \"\"\" \n Calculates the state code for a specific state or set of states.\n We transform the states so that they are nonnegative and take an inner product.\n The resulting number is unique because we use numeral system with a large enough base.\n \"\"\... | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.transitionStates | python | def transitionStates(self,state):
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates | Return the indices of new states and their rates. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L292-L298 | [
"def transition(self, state):\n \"\"\"\n To be provided by the subclass. \n Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.\n For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the tran... | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.directInitialMatrix | python | def directInitialMatrix(self):
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr() | We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L300-L339 | [
"def setStateCodes(self): \n \"\"\" \n Generates (sorted) codes for the states in the statespace\n This is used to quickly identify which states occur after a transition/action\n \"\"\"\n\n #calculate the statespace and determine the minima and maxima each element in the state vecto... | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.convertToRateMatrix | python | def convertToRateMatrix(self, Q):
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag | Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L341-L349 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.convertToProbabilityMatrix | python | def convertToProbabilityMatrix(self, Q):
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l) | Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L351-L363 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.assertSingleClass | python | def assertSingleClass(self,P):
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components | Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L365-L371 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.getTransitionMatrix | python | def getTransitionMatrix(self,probabilities=True):
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P | If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L373-L399 | [
"def indirectInitialMatrix(self, initialState):\n \"\"\"\n Given some initial state, this iteratively determines new states.\n We repeatedly call the transition function on unvisited states in the frontier set.\n Each newly visited state is put in a dictionary called 'mapping' and the rates are stored i... | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.powerMethod | python | def powerMethod(self, tol = 1e-8, maxiter = 1e5):
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi | Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L407-L441 | [
"def getIrreducibleTransitionMatrix(self,probabilities=True):\n #Gets the transitionmatrix and assert that it consists of a single irreducible class.\n P = self.getTransitionMatrix(probabilities=True)\n self.assertSingleClass(P) \n return P\n"
] | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.eigenMethod | python | def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi | Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L443-L487 | [
"def getIrreducibleTransitionMatrix(self,probabilities=True):\n #Gets the transitionmatrix and assert that it consists of a single irreducible class.\n P = self.getTransitionMatrix(probabilities=True)\n self.assertSingleClass(P) \n return P\n"
] | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.linearMethod | python | def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs) | Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/ | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L489-L521 | [
"def getIrreducibleTransitionMatrix(self,probabilities=True):\n #Gets the transitionmatrix and assert that it consists of a single irreducible class.\n P = self.getTransitionMatrix(probabilities=True)\n self.assertSingleClass(P) \n return P\n"
] | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.krylovMethod | python | def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi | We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/ | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L523-L563 | [
"def getIrreducibleTransitionMatrix(self,probabilities=True):\n #Gets the transitionmatrix and assert that it consists of a single irreducible class.\n P = self.getTransitionMatrix(probabilities=True)\n self.assertSingleClass(P) \n return P\n"
] | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.computePi | python | def computePi(self,method='power'):
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)() | Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` . | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L565-L594 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
def printPi(self):
"""
Prints all states state and their steady state probabilities.
Not recommended for large state spaces.
"""
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key])
|
gvanderheide/discreteMarkovChain | discreteMarkovChain/markovChain.py | markovChain.printPi | python | def printPi(self):
assert self.pi is not None, "Calculate pi before calling printPi()"
assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc."
for key,state in self.mapping.items():
print(state,self.pi[key]) | Prints all states state and their steady state probabilities.
Not recommended for large state spaces. | train | https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L596-L604 | null | class markovChain(object):
"""
A class for calculating the steady state distribution of a Markov chain with a finite and discrete state space.
The Markov chain can be defined on continuous time or discrete time and states can be integers or vectors of integers.
Summary
-------
If the transition matrix ``P`` is specified by the user, we use that for calculating the steady-state distribution.
Otherwise, we derive ``P`` automatically using an indirect or a direct method.
Both the indirect and direct method require the function :func:`transition` to be defined within the class, calculating for each state the reachable states and corresponding rates/probabilities.
Implementing this function is similar in difficulty as constructing ``P`` manually, since when you construct ``P`` you also have to determine where you can go from any given state.
For the indirect method the user needs to specify an initial state in the class attribute ``initialState``.
By repeatedly calling the transition function on unvisited states, all reachable states are determined starting from this initial state.
For the direct method the function :func:`statespace` is required, giving the complete state space in a 2d numpy array.
We build up ``P`` by calling :func:`transition` on each state in the statespace.
Steady state distributions can be calculated by calling :func:`computePi` with a method of choice.
Parameters
----------
P : array(float,ndim=2), optional(default=None)
Optional argument. The transition matrix of the Markov chain. Needs to have an equal number of columns and rows. Can be sparse or dense.
direct : bool, optional(default=False)
Specifies whether the indirect method is used or the direct method in case ``P`` is not defined. By default, ``direct=False``.
Attributes
----------
pi : array(float)
The steady state probabilities for each state in the state space.
mapping : dict
The keys are the indices of the states in ``P`` and ``pi``, the values are the states. Useful only when using the direct/indirect method.
size : int
The size of the state space.
P : scipy.sparse.csr_matrix
The sparse transition/rate matrix.
initialState : int or array_like(int)
State from which to start the indirect method. Should be provided in the subclass by the user.
Methods
-------
transition(state)
Transition function of the Markov chain, returning the reachable states from `state` and their probabilities/rates.
Should be be provided in the subclass by the user when using the indirect/direct method.
statespace()
Returns the state space of the Markov chain. Should be be provided in the subclass by the user when using the direct method.
computePi(method='power')
Call with 'power','linear','eigen' or 'krylov' to use a certain method for obtaining ``pi``.
printPi()
Print all states and their corresponding steady state probabilities. Not recommended for large state spaces.
linearMethod()
Use :func:`spsolve`, the standard linear algebra solver for sparse matrices, to obtain ``pi``.
powerMethod(tol=1e-8,numIter=1e5)
Use repeated multiplication of the transition matrix to obtain ``pi``.
eigenMethod(tol=1e-8,numIter=1e5)
Search for the first left eigenvalue to obtain ``pi``.
krylovMethod(tol=1e-8)
Search for ``pi`` in Krylov subspace using the :func:`gmres` procedure for sparse matrices.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power') #alternative: 'linear','eigen' or 'krylov'.
>>> print(mc.pi)
[ 0.54545455 0.45454545]
"""
def __init__(self,P=None,direct=False):
self.P = P
self.direct = direct
self.pi = None #steady state probability vector
self.mapping = {} #mapping used to identify states
self.initialState = None #a dummy initial state
@property
def size(self):
"""
Return the number of states in the state space, if ``self.mapping`` is defined.
"""
return len(self.mapping)
def statespace(self):
"""
To be provided by the subclass. Return the state space
in an integer 2d numpy array with a state on each row.
"""
raise NotImplementedError('Implement the function statespace() in the subclass')
def transition(self, state):
"""
To be provided by the subclass.
Return a 2d numpy array with reachable states and a 1d numpy array with transition rates.
For the iterative method, it is also allowed to return a dictionary where the keys are tuples with the state and the values are the transition rates.
Ensure that unique states are returned.
"""
raise NotImplementedError('Implement the function transition() in the subclass')
def checkInitialState(self,initialState):
"""
Check whether the initial state is of the correct type.
The state should be either an int, list, tuple or np.array and all its elements must be integer.
Returns an int if the state is an integer, otherwise a tuple.
"""
assert initialState is not None, "Initial state has not been specified."
assert isinstance(initialState,(int,list,tuple,np.ndarray,set)), "initialState %r is not an int, tuple, list, set or numpy array" % initialState
if isinstance(initialState,list):
#Check whether all entries of the list are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,tuple):
#Check whether all entries are ints. Return an int if the len ==1, otherwise a tuple.
assert all(isinstance(i, int) for i in initialState), "initialState %r is not integer" % initialState
if len(initialState)==1:
initialState = int(initialState)
elif isinstance(initialState,np.ndarray):
#Check whether the state is a 1d numpy array. Return an int if it has length 1.
assert issubclass(initialState.dtype.type, np.integer) and initialState.ndim==1, "initialState %r is not a one-dimensional integer numpy array" % initialState
initialState = int(initialState) if len(initialState)==1 else tuple(initialState)
elif isinstance(initialState,set):
#If we have a set, then check whether all elements are ints or tuples.
for state in initialState:
assert isinstance(state,(tuple,int)), "the set initialState %r should contain tuples or ints" % initialState
if isinstance(state,tuple):
assert all(isinstance(i,int) for i in state), "the state %r should be integer" % initialState
return initialState
def checkTransitionType(self,state):
"""
Check whether the transition function returns output of the correct types.
This can be either a dictionary with as keys ints/tuples and values floats.
Or a tuple consisting of a 2d integer numpy array with states and a 1d numpy array with rates.
"""
test = self.transition(state)
assert isinstance(test,(dict,tuple)), "Transition function does not return a dict or tuple"
if isinstance(test,dict):
assert all(isinstance(states, (int,tuple)) for states in test.keys()), "Transition function returns a dict, but states are not represented as tuples or integers"
assert all(isinstance(rates, float) for rates in test.values()), "Transition function returns a dict, but the rates should be floats."
usesNumpy=False
if isinstance(test,tuple):
assert len(test)==2, "The transition function should return two variables: states and rates."
states,rates = test
assert isinstance(states, np.ndarray) and states.ndim==2 and issubclass(states.dtype.type, np.integer), "The states returned by the transition function need to be an integer 2d numpy array: %r" %states
assert isinstance(rates, np.ndarray) and rates.ndim==1, "The rates returned by the transition function need to be a 1d numpy array: %r" % rates
usesNumpy = True
return usesNumpy
def convertToTransitionDict(self,transitions):
"""
If numpy is used, then this converts the output from transition() into a dict.
"""
states,rates = transitions
rateDict = defaultdict(float)
if states.shape[1] == 1:
for idx,state in enumerate(states):
rateDict[int(state)] += rates[idx]
else:
for idx,state in enumerate(states):
rateDict[tuple(state)] += rates[idx]
return rateDict
def indirectInitialMatrix(self, initialState):
"""
Given some initial state, this iteratively determines new states.
We repeatedly call the transition function on unvisited states in the frontier set.
Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
"""
mapping = {}
rates = OrderedDict()
#Check whether the initial state is defined and of the correct type, and convert to a tuple or int.
convertedState = self.checkInitialState(initialState)
if isinstance(convertedState,set):
#If initialstates is a set, include all states in the set in the mapping.
frontier = set( convertedState )
for idx,state in enumerate(convertedState):
mapping[state] = idx
if idx == 0: #Test the return type of the transition function (dict or numpy).
usesNumpy = self.checkTransitionType(initialState)
else:
#Otherwise include only the single state.
frontier = set( [convertedState] )
usesNumpy = self.checkTransitionType(initialState)
mapping[convertedState] = 0
while len(frontier) > 0:
fromstate = frontier.pop()
fromindex = mapping[fromstate]
if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates.
transitions = self.transition(np.array(fromstate))
transitions = self.convertToTransitionDict(transitions)
else:
transitions = self.transition(fromstate)
for tostate,rate in transitions.items():
if tostate not in mapping:
frontier.add(tostate)
mapping[tostate] = len(mapping)
toindex = mapping[tostate]
rates[(fromindex, toindex)] = rate
#Inverse the keys and values in mapping to get a dictionary with indices and states.
self.mapping = {value: key for key, value in list(mapping.items())}
#Use the `rates` dictionary to fill a sparse dok matrix.
D = dok_matrix((self.size,self.size))
D.update(rates)
return D.tocsr()
def getStateCode(self,state):
"""
Calculates the state code for a specific state or set of states.
We transform the states so that they are nonnegative and take an inner product.
The resulting number is unique because we use numeral system with a large enough base.
"""
return np.dot(state-self.minvalues,self.statecode)
def setStateCodes(self):
"""
Generates (sorted) codes for the states in the statespace
This is used to quickly identify which states occur after a transition/action
"""
#calculate the statespace and determine the minima and maxima each element in the state vector
statespace = self.statespace()
self.minvalues = np.amin(statespace,axis=0)
self.maxvalues = np.amax(statespace,axis=0)
#calculate the largest number of values and create a state code
statesize = statespace.shape[1]
largestRange = 1+np.max(self.maxvalues-self.minvalues)
self.statecode = np.power(largestRange, np.arange(statesize),dtype=int)
#Calculate the codes, sort them, and store them in self.codes
codes = self.getStateCode(statespace)
sorted_indices = np.argsort(codes)
self.codes = codes[sorted_indices]
if np.unique(self.codes).shape != self.codes.shape:
raise "Non-unique coding of states, results are unreliable"
#For the end results, it is useful to put the indices and corresponding states in a dictionary
mapping = OrderedDict()
for index,state in enumerate(statespace[sorted_indices]):
mapping[index] = state
self.mapping = mapping
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates
def directInitialMatrix(self):
"""
We generate an initial sparse matrix with all the transition rates (or probabilities).
We later transform this matrix into a rate or probability matrix depending on the preferred method of obtaining pi.
"""
#First initialize state codes and the mapping with states.
self.setStateCodes()
#For each state, calculate the indices of reached states and rates using the transition function.
results = imap(self.transitionStates, self.mapping.values())
#Simpler alternative that uses less memory.
#Would be competitive if the conversion from dok to csr is faster.
# D = dok_matrix((self.size,self.size),dtype=float)
# for index,(col,rate) in enumerate(results):
# D.update({(index,c): r for c,r in zip(col,rate)})
# return D.tocsr()
#preallocate memory for the rows, cols and rates of the sparse matrix
rows = np.empty(self.size,dtype=int)
cols = np.empty(self.size,dtype=int)
rates = np.empty(self.size,dtype=float)
#now fill the arrays with the results, increasing their size if current memory is too small.
right = 0
for index,(col,rate) in enumerate(results): #more robust alternative: in izip(self.mapping.keys(),results)
left = right
right += len(col)
if right >= len(cols):
new_capacity = int(round(right * 1.5)) #increase the allocated memory if the vectors turn out to be too small.
cols.resize(new_capacity)
rates.resize(new_capacity)
rows.resize(new_capacity)
rows[left:right] = index #since states are sorted, the index indeed corresponds to the state.
cols[left:right] = col
rates[left:right] = rate
#Place all data in a coo_matrix and convert to a csr_matrix for quick computations.
return coo_matrix((rates[:right],(rows[:right],cols[:right])),shape=(self.size,self.size)).tocsr()
def convertToRateMatrix(self, Q):
"""
Converts the initial matrix to a rate matrix.
We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
"""
rowSums = Q.sum(axis=1).getA1()
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Q-Qdiag
def convertToProbabilityMatrix(self, Q):
"""
Converts the initial matrix to a probability matrix
We calculate P = I + Q/l, with l the largest diagonal element.
Even if Q is already a probability matrix, this step helps for numerical stability.
By adding a small probability on the diagonal (0.00001), periodicity can be prevented.
"""
rowSums = Q.sum(axis=1).getA1()
l = np.max(rowSums)*1.00001
diagonalElements = 1.-rowSums/l
idxRange = np.arange(Q.shape[0])
Qdiag = coo_matrix((diagonalElements,(idxRange,idxRange)),shape=Q.shape).tocsr()
return Qdiag+Q.multiply(1./l)
def assertSingleClass(self,P):
"""
Check whether the rate/probability matrix consists of a single connected class.
If this is not the case, the steady state distribution is not well defined.
"""
components, _ = csgraph.connected_components(P, directed=True, connection='weak')
assert components==1, "The Markov chain has %r communicating classes. Make sure there is a single communicating class." %components
def getTransitionMatrix(self,probabilities=True):
"""
If self.P has been given already, we will reuse it and convert it to a sparse csr matrix if needed.
Otherwise, we will generate it using the direct or indirect method.
Since most solution methods use a probability matrix, this is the default setting.
By setting probabilities=False we can also return a rate matrix.
"""
if self.P is not None:
if isspmatrix(self.P):
if not isspmatrix_csr(self.P):
self.P = self.P.tocsr()
else:
assert isinstance(self.P, np.ndarray) and self.P.ndim==2 and self.P.shape[0]==self.P.shape[1],'P needs to be a 2d numpy array with an equal number of columns and rows'
self.P = csr_matrix(self.P)
elif self.direct == True:
self.P = self.directInitialMatrix()
else:
self.P = self.indirectInitialMatrix(self.initialState)
if probabilities:
P = self.convertToProbabilityMatrix(self.P)
else:
P = self.convertToRateMatrix(self.P)
return P
def getIrreducibleTransitionMatrix(self,probabilities=True):
#Gets the transitionmatrix and assert that it consists of a single irreducible class.
P = self.getTransitionMatrix(probabilities=True)
self.assertSingleClass(P)
return P
def powerMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Carry out the power method and store the result in the class attribute ``pi``.
Repeatedly takes the dot product between ``P`` and ``pi`` until the norm is smaller than the prespecified tolerance ``tol``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of power iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.powerMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The power method is robust even when state space becomes large (more than 500.000 states), whereas the other methods may have some issues with memory or convergence.
The power method may converge slowly for Markov chains where states are rather disconnected. That is, when the expected time to go from one state to another is large.
"""
P = self.getIrreducibleTransitionMatrix().T #take transpose now to speed up dot product.
size = P.shape[0]
pi = np.zeros(size); pi1 = np.zeros(size)
pi[0] = 1;
n = norm(pi - pi1,1); i = 0;
while n > tol and i < maxiter:
pi1 = P.dot(pi)
pi = P.dot(pi1)
n = norm(pi - pi1,1); i += 1
self.pi = pi
def eigenMethod(self, tol = 1e-8, maxiter = 1e5):
"""
Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function.
The result is stored in the class attribute ``pi``.
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
maxiter : int, optional(default=1e5)
The maximum number of iterations to be carried out.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.eigenMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
The speed of convergence depends heavily on the choice of the initial guess for ``pi``.
Here we let the initial ``pi`` be a vector of ones.
For large state spaces, this method may not work well.
At the moment, we call :func:`powerMethod` if the number of states is 2.
Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html
"""
Q = self.getIrreducibleTransitionMatrix(probabilities=False)
if Q.shape == (1, 1):
self.pi = np.array([1.0])
return
if Q.shape == (2, 2):
self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0]))
return
size = Q.shape[0]
guess = np.ones(size,dtype=float)
w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter)
pi = v[:, 0].real
pi /= pi.sum()
self.pi = pi
def linearMethod(self):
"""
Determines ``pi`` by solving a system of linear equations using :func:`spsolve`.
The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.linearMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Remarks
-------
For large state spaces, the linear algebra solver may not work well due to memory overflow.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
self.pi = spsolve(A, rhs)
def krylovMethod(self,tol=1e-8):
"""
We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations.
It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.krylovMethod()
>>> print(mc.pi)
[ 0.54545455 0.45454545]
Parameters
----------
tol : float, optional(default=1e-8)
Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``.
Remarks
-------
For large state spaces, this method may not always give a solution.
Code due to http://stackoverflow.com/questions/21308848/
"""
P = self.getIrreducibleTransitionMatrix()
#if P consists of one element, then set self.pi = 1.0
if P.shape == (1, 1):
self.pi = np.array([1.0])
return
size = P.shape[0]
dP = P - eye(size)
#Replace the first equation by the normalizing condition.
A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()
rhs = np.zeros((size,))
rhs[0] = 1
pi, info = gmres(A, rhs, tol=tol)
if info != 0:
raise RuntimeError("gmres did not converge")
self.pi = pi
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)()
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations._get_sql_server_ver | python | def _get_sql_server_ver(self):
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver | Returns the version of the SQL Server in use: | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L124-L146 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations._switch_tz_offset_sql | python | def _switch_tz_offset_sql(self, field_name, tzname):
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name | Returns the SQL that will convert field_name to UTC from tzname. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L171-L194 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.datetime_trunc_sql | python | def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, [] | Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L196-L217 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.last_insert_id | python | def last_insert_id(self, cursor, table_name, pk_name):
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0] | Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L240-L265 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.quote_name | python | def quote_name(self, name):
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote) | Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L283-L290 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.last_executed_query | python | def last_executed_query(self, cursor, sql, params):
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params) | Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L298-L308 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.sql_flush | python | def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return [] | Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L330-L385 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.adapt_datetimefield_value | python | def adapt_datetimefield_value(self, value):
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value | Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L431-L444 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.adapt_timefield_value | python | def adapt_timefield_value(self, value):
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second) | Transform a time value to an object compatible with what is expected
by the backend driver for time columns. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L446-L456 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.adapt_decimalfield_value | python | def adapt_decimalfield_value(self, value, max_digits, decimal_places):
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value) | Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L470-L483 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/operations.py | DatabaseOperations.convert_values | python | def convert_values(self, value, field):
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value | Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008 | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L485-L524 | null | class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django_pyodbc.compiler"
def __init__(self, connection):
if connection._DJANGO_VERSION >= 14:
super(DatabaseOperations, self).__init__(connection)
else:
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
self._ss_edition = None
self._is_db2 = None
self._is_openedge = None
self._left_sql_quote = None
self._right_sql_quote = None
@property
def is_db2(self):
if self._is_db2 is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_db2 = options.get('is_db2', False)
return self._is_db2
@property
def is_openedge(self):
if self._is_openedge is None:
options = self.connection.settings_dict.get('OPTIONS', {})
self._is_openedge = options.get('openedge', False)
return self._is_openedge
@property
def left_sql_quote(self):
if self._left_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('left_sql_quote', None)
if q is not None:
self._left_sql_quote = q
elif self.is_db2:
self._left_sql_quote = '{'
elif self.is_openedge:
self._left_sql_quote = '"'
else:
self._left_sql_quote = '['
return self._left_sql_quote
@property
def right_sql_quote(self):
if self._right_sql_quote is None:
options = self.connection.settings_dict.get('OPTIONS', {})
q = options.get('right_sql_quote', None)
if q is not None:
self._right_sql_quote = q
elif self.is_db2:
self._right_sql_quote = '}'
elif self.is_openedge:
self._right_sql_quote = '"'
else:
self._right_sql_quote = ']'
return self._right_sql_quote
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def _on_azure_sql_db(self):
if self._ss_edition is not None:
return self._ss_edition == EDITION_AZURE_SQL_DB
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('EngineEdition') as integer)")
self._ss_edition = cur.fetchone()[0]
return self._ss_edition == EDITION_AZURE_SQL_DB
on_azure_sql_db = property(_on_azure_sql_db)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def field_cast_sql(self, db_type, internal_type=None):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
TODO: verify that db_type and internal_type do not affect T-SQL CAST statement
"""
if self.sql_server_ver < 2005 and db_type and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 128
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVE TRANSACTION %s" % sid
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "COMMIT TRANSACTION %s" % sid
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('\\', '\\\\').replace('[', '[[]').replace('%', '[%]').replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
#context.rounding = ROUND_FLOOR
return "%.*f" % (decimal_places + 1, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places + 1, value)
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
if self.connection._DJANGO_VERSION < 15:
# This gets around inflexibility of SQLInsertCompiler's need to
# append an SQL fragment at the end of the insert query, which also must
# expect the full quoted table and column name.
return ('/* %s */', '')
# Django #19096 - As of Django 1.5, can return None, None to bypass the
# core's SQL mangling.
return (None, None)
|
lionheart/django-pyodbc | django_pyodbc/introspection.py | DatabaseIntrospection.get_table_list | python | def get_table_list(self, cursor):
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
# TODO: Believe the below queries should actually select `TABLE_NAME, TABLE_TYPE`
if cursor.db_wrpr.limit_table_list:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_SCHEMA = 'dbo'")
else:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row_to_table_info(row) for row in cursor.fetchall()] | Returns a list of table names in the current database. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/introspection.py#L86-L97 | null | class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'IntegerField',
Database.SQL_BIGINT: 'BigIntegerField',
Database.SQL_BINARY: 'BinaryField',
Database.SQL_BIT: 'NullBooleanField',
Database.SQL_CHAR: 'CharField',
Database.SQL_DECIMAL: 'DecimalField',
Database.SQL_DOUBLE: 'FloatField',
Database.SQL_FLOAT: 'FloatField',
Database.SQL_GUID: 'TextField',
Database.SQL_INTEGER: 'IntegerField',
Database.SQL_LONGVARBINARY: 'BinaryField',
#Database.SQL_LONGVARCHAR: ,
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Database.SQL_TYPE_TIMESTAMP: 'DateTimeField',
Database.SQL_VARBINARY: 'BinaryField',
Database.SQL_VARCHAR: 'TextField',
Database.SQL_WCHAR: 'CharField',
Database.SQL_WLONGVARCHAR: 'TextField',
Database.SQL_WVARCHAR: 'TextField',
}
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
#from django.db import connection
#cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
# (connection.ops.quote_name(table_name), column_name))
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes IX on IX.object_id = T.object_id and IX.index_id = IC.index_id
where
T.name = %s
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql,[table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key":primary_key, "unique":unique}
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql,[table_name])
relations = cursor.fetchall()
key_columns = []
key_columns.extend([(source_column, target_table, target_column) \
for source_column, target_table, target_column in relations])
return key_columns
|
lionheart/django-pyodbc | django_pyodbc/introspection.py | DatabaseIntrospection._is_auto_field | python | def _is_auto_field(self, cursor, table_name, column_name):
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
#from django.db import connection
#cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
# (connection.ops.quote_name(table_name), column_name))
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0] | Checks whether column is Identity | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/introspection.py#L99-L110 | null | class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'IntegerField',
Database.SQL_BIGINT: 'BigIntegerField',
Database.SQL_BINARY: 'BinaryField',
Database.SQL_BIT: 'NullBooleanField',
Database.SQL_CHAR: 'CharField',
Database.SQL_DECIMAL: 'DecimalField',
Database.SQL_DOUBLE: 'FloatField',
Database.SQL_FLOAT: 'FloatField',
Database.SQL_GUID: 'TextField',
Database.SQL_INTEGER: 'IntegerField',
Database.SQL_LONGVARBINARY: 'BinaryField',
#Database.SQL_LONGVARCHAR: ,
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Database.SQL_TYPE_TIMESTAMP: 'DateTimeField',
Database.SQL_VARBINARY: 'BinaryField',
Database.SQL_VARCHAR: 'TextField',
Database.SQL_WCHAR: 'CharField',
Database.SQL_WLONGVARCHAR: 'TextField',
Database.SQL_WVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
# TODO: Believe the below queries should actually select `TABLE_NAME, TABLE_TYPE`
if cursor.db_wrpr.limit_table_list:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_SCHEMA = 'dbo'")
else:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row_to_table_info(row) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes IX on IX.object_id = T.object_id and IX.index_id = IC.index_id
where
T.name = %s
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql,[table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key":primary_key, "unique":unique}
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql,[table_name])
relations = cursor.fetchall()
key_columns = []
key_columns.extend([(source_column, target_table, target_column) \
for source_column, target_table, target_column in relations])
return key_columns
|
lionheart/django-pyodbc | django_pyodbc/introspection.py | DatabaseIntrospection.get_table_description | python | def get_table_description(self, cursor, table_name, identity_check=True):
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items | Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/introspection.py#L114-L137 | null | class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'IntegerField',
Database.SQL_BIGINT: 'BigIntegerField',
Database.SQL_BINARY: 'BinaryField',
Database.SQL_BIT: 'NullBooleanField',
Database.SQL_CHAR: 'CharField',
Database.SQL_DECIMAL: 'DecimalField',
Database.SQL_DOUBLE: 'FloatField',
Database.SQL_FLOAT: 'FloatField',
Database.SQL_GUID: 'TextField',
Database.SQL_INTEGER: 'IntegerField',
Database.SQL_LONGVARBINARY: 'BinaryField',
#Database.SQL_LONGVARCHAR: ,
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Database.SQL_TYPE_TIMESTAMP: 'DateTimeField',
Database.SQL_VARBINARY: 'BinaryField',
Database.SQL_VARCHAR: 'TextField',
Database.SQL_WCHAR: 'CharField',
Database.SQL_WLONGVARCHAR: 'TextField',
Database.SQL_WVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
# TODO: Believe the below queries should actually select `TABLE_NAME, TABLE_TYPE`
if cursor.db_wrpr.limit_table_list:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_SCHEMA = 'dbo'")
else:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row_to_table_info(row) for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
#from django.db import connection
#cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
# (connection.ops.quote_name(table_name), column_name))
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes IX on IX.object_id = T.object_id and IX.index_id = IC.index_id
where
T.name = %s
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql,[table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key":primary_key, "unique":unique}
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql,[table_name])
relations = cursor.fetchall()
key_columns = []
key_columns.extend([(source_column, target_table, target_column) \
for source_column, target_table, target_column in relations])
return key_columns
|
lionheart/django-pyodbc | django_pyodbc/introspection.py | DatabaseIntrospection._name_to_index | python | def _name_to_index(self, cursor, table_name):
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))]) | Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/introspection.py#L139-L144 | null | class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'IntegerField',
Database.SQL_BIGINT: 'BigIntegerField',
Database.SQL_BINARY: 'BinaryField',
Database.SQL_BIT: 'NullBooleanField',
Database.SQL_CHAR: 'CharField',
Database.SQL_DECIMAL: 'DecimalField',
Database.SQL_DOUBLE: 'FloatField',
Database.SQL_FLOAT: 'FloatField',
Database.SQL_GUID: 'TextField',
Database.SQL_INTEGER: 'IntegerField',
Database.SQL_LONGVARBINARY: 'BinaryField',
#Database.SQL_LONGVARCHAR: ,
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Database.SQL_TYPE_TIMESTAMP: 'DateTimeField',
Database.SQL_VARBINARY: 'BinaryField',
Database.SQL_VARCHAR: 'TextField',
Database.SQL_WCHAR: 'CharField',
Database.SQL_WLONGVARCHAR: 'TextField',
Database.SQL_WVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
# TODO: Believe the below queries should actually select `TABLE_NAME, TABLE_TYPE`
if cursor.db_wrpr.limit_table_list:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_SCHEMA = 'dbo'")
else:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row_to_table_info(row) for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
#from django.db import connection
#cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
# (connection.ops.quote_name(table_name), column_name))
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes IX on IX.object_id = T.object_id and IX.index_id = IC.index_id
where
T.name = %s
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql,[table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key":primary_key, "unique":unique}
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql,[table_name])
relations = cursor.fetchall()
key_columns = []
key_columns.extend([(source_column, target_table, target_column) \
for source_column, target_table, target_column in relations])
return key_columns
|
lionheart/django-pyodbc | django_pyodbc/introspection.py | DatabaseIntrospection.get_relations | python | def get_relations(self, cursor, table_name):
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()]) | Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/introspection.py#L146-L173 | null | class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'IntegerField',
Database.SQL_BIGINT: 'BigIntegerField',
Database.SQL_BINARY: 'BinaryField',
Database.SQL_BIT: 'NullBooleanField',
Database.SQL_CHAR: 'CharField',
Database.SQL_DECIMAL: 'DecimalField',
Database.SQL_DOUBLE: 'FloatField',
Database.SQL_FLOAT: 'FloatField',
Database.SQL_GUID: 'TextField',
Database.SQL_INTEGER: 'IntegerField',
Database.SQL_LONGVARBINARY: 'BinaryField',
#Database.SQL_LONGVARCHAR: ,
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Database.SQL_TYPE_TIMESTAMP: 'DateTimeField',
Database.SQL_VARBINARY: 'BinaryField',
Database.SQL_VARCHAR: 'TextField',
Database.SQL_WCHAR: 'CharField',
Database.SQL_WLONGVARCHAR: 'TextField',
Database.SQL_WVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
# TODO: Believe the below queries should actually select `TABLE_NAME, TABLE_TYPE`
if cursor.db_wrpr.limit_table_list:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_SCHEMA = 'dbo'")
else:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row_to_table_info(row) for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
#from django.db import connection
#cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
# (connection.ops.quote_name(table_name), column_name))
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes IX on IX.object_id = T.object_id and IX.index_id = IC.index_id
where
T.name = %s
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql,[table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key":primary_key, "unique":unique}
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql,[table_name])
relations = cursor.fetchall()
key_columns = []
key_columns.extend([(source_column, target_table, target_column) \
for source_column, target_table, target_column in relations])
return key_columns
|
lionheart/django-pyodbc | django_pyodbc/introspection.py | DatabaseIntrospection.get_key_columns | python | def get_key_columns(self, cursor, table_name):
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql,[table_name])
relations = cursor.fetchall()
key_columns = []
key_columns.extend([(source_column, target_table, target_column) \
for source_column, target_table, target_column in relations])
return key_columns | Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/introspection.py#L221-L260 | null | class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'IntegerField',
Database.SQL_BIGINT: 'BigIntegerField',
Database.SQL_BINARY: 'BinaryField',
Database.SQL_BIT: 'NullBooleanField',
Database.SQL_CHAR: 'CharField',
Database.SQL_DECIMAL: 'DecimalField',
Database.SQL_DOUBLE: 'FloatField',
Database.SQL_FLOAT: 'FloatField',
Database.SQL_GUID: 'TextField',
Database.SQL_INTEGER: 'IntegerField',
Database.SQL_LONGVARBINARY: 'BinaryField',
#Database.SQL_LONGVARCHAR: ,
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Database.SQL_TYPE_TIMESTAMP: 'DateTimeField',
Database.SQL_VARBINARY: 'BinaryField',
Database.SQL_VARCHAR: 'TextField',
Database.SQL_WCHAR: 'CharField',
Database.SQL_WLONGVARCHAR: 'TextField',
Database.SQL_WVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
# TODO: Believe the below queries should actually select `TABLE_NAME, TABLE_TYPE`
if cursor.db_wrpr.limit_table_list:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_SCHEMA = 'dbo'")
else:
cursor.execute("SELECT TABLE_NAME, 't' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row_to_table_info(row) for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
#from django.db import connection
#cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
# (connection.ops.quote_name(table_name), column_name))
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes IX on IX.object_id = T.object_id and IX.index_id = IC.index_id
where
T.name = %s
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql,[table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key":primary_key, "unique":unique}
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
|
lionheart/django-pyodbc | django_pyodbc/compiler.py | _break | python | def _break(s, find):
i = s.find(find)
return s[:i], s[i:] | Break a string s into the part before the substring to find,
and the part including and after the substring. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/compiler.py#L107-L111 | null | # Copyright 2013-2017 Lionheart Software LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008, django-pyodbc developers (see README.rst).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of django-sql-server nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import types
from datetime import date, datetime
import django
from django import VERSION as DjangoVersion
from django.db.models.sql import compiler, where
from django_pyodbc.compat import zip_longest
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
# Pattern to scan a column data type string and split the data type from any
# constraints or other included parts of a column definition. Based upon
# <column_definition> from http://msdn.microsoft.com/en-us/library/ms174979.aspx
_re_data_type_terminator = re.compile(
r'\s*\b(?:' +
r'filestream|collate|sparse|not|null|constraint|default|identity|rowguidcol' +
r'|primary|unique|clustered|nonclustered|with|on|foreign|references|check' +
')',
re.IGNORECASE,
)
_re_order_limit_offset = re.compile(
r'(?:ORDER BY\s+(.+?))?\s*(?:LIMIT\s+(\d+))?\s*(?:OFFSET\s+(\d+))?$')
# Pattern used in column aliasing to find sub-select placeholders
_re_col_placeholder = re.compile(r'\{_placeholder_(\d+)\}')
_re_find_order_direction = re.compile(r'\s+(asc|desc)\s*$', re.IGNORECASE)
def _remove_order_limit_offset(sql):
return _re_order_limit_offset.sub('',sql).split(None, 1)[1]
def _get_order_limit_offset(sql):
return _re_order_limit_offset.search(sql).groups()
def where_date(self, compiler, connection):
query, data = self.as_sql(compiler, connection)
if len(data) != 1:
raise Error('Multiple data items in date condition') # I don't think this can happen but I'm adding an exception just in case
if type(self.rhs) == date:
return [query, [self.rhs]]
elif type(self.lhs) == date:
return [query, [self.lhs]]
class SQLCompiler(compiler.SQLCompiler):
def __init__(self,*args,**kwargs):
super(SQLCompiler,self).__init__(*args,**kwargs)
# Pattern to find the quoted column name at the end of a field
# specification
#
# E.g., if you're talking to MS SQL this regex would become
# \[([^\[]+)\]$
#
# This would match the underlined part of the following string:
# [foo_table][bar_column]
# ^^^^^^^^^^^^
self._re_pat_col = re.compile(
r"\{left_sql_quote}([^\{left_sql_quote}]+)\{right_sql_quote}$".format(
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote))
def compile(self, node, select_format=False):
if self.connection.ops.is_openedge and type(node) is where.WhereNode:
for val in node.children:
# If we too many more of these special cases we should probably move them to another file
if type(val.rhs) == date or type(val.lhs) == date:
setattr(val, 'as_microsoft', types.MethodType(where_date, val))
args = [node]
if select_format:
args.append(select_format)
return super(SQLCompiler, self).compile(*args)
def resolve_columns(self, row, fields=()):
# If the results are sliced, the resultset will have an initial
# "row number" column. Remove this column before the ORM sees it.
if getattr(self, '_using_row_number', False):
row = row[1:]
values = []
index_extra_select = len(self.query.extra_select)
for value, field in zip_longest(row[index_extra_select:], fields):
# print '\tfield=%s\tvalue=%s' % (repr(field), repr(value))
if field:
try:
value = self.connection.ops.convert_values(value, field)
except ValueError:
pass
values.append(value)
return row[:index_extra_select] + tuple(values)
def _fix_aggregates(self):
"""
MSSQL doesn't match the behavior of the other backends on a few of
the aggregate functions; different return type behavior, different
function names, etc.
MSSQL's implementation of AVG maintains datatype without proding. To
match behavior of other django backends, it needs to not drop remainders.
E.g. AVG([1, 2]) needs to yield 1.5, not 1
"""
try:
# for django 1.10 and up (works starting in 1.8 so I am told)
select = self.query.annotation_select
except AttributeError:
# older
select = self.query.aggregate_select
for alias, aggregate in select.items():
if not hasattr(aggregate, 'sql_function'):
continue
if aggregate.sql_function == 'AVG':# and self.connection.cast_avg_to_float:
# Embed the CAST in the template on this query to
# maintain multi-db support.
select[alias].sql_template = \
'%(function)s(CAST(%(field)s AS FLOAT))'
# translate StdDev function names
elif aggregate.sql_function == 'STDDEV_SAMP':
select[alias].sql_function = 'STDEV'
elif aggregate.sql_function == 'STDDEV_POP':
select[alias].sql_function = 'STDEVP'
# translate Variance function names
elif aggregate.sql_function == 'VAR_SAMP':
select[alias].sql_function = 'VAR'
elif aggregate.sql_function == 'VAR_POP':
select[alias].sql_function = 'VARP'
def as_sql(self, with_limits=True, with_col_aliases=False, qn=None, **kwargs):
self.pre_sql_setup()
# Django #12192 - Don't execute any DB query when QS slicing results in limit 0
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self._fix_aggregates()
self._using_row_number = False
# Get out of the way if we're not a select query or there's no limiting involved.
check_limits = with_limits and (self.query.low_mark or self.query.high_mark is not None)
if not check_limits:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
try:
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
result = super(SQLCompiler, self).as_sql(with_limits, with_col_aliases, **kwargs)
finally:
# remove in case query is every reused
delattr(self.query, '_mssql_ordering_not_allowed')
return result
raw_sql, fields = super(SQLCompiler, self).as_sql(False, with_col_aliases, **kwargs)
# Check for high mark only and replace with "TOP"
if self.query.high_mark is not None and not self.query.low_mark:
if self.connection.ops.is_db2:
sql = self._select_top('', raw_sql, self.query.high_mark)
else:
_select = 'SELECT'
if self.query.distinct:
_select += ' DISTINCT'
sql = re.sub(r'(?i)^{0}'.format(_select), '{0} TOP {1}'.format(_select, self.query.high_mark), raw_sql, 1)
return sql, fields
# Else we have limits; rewrite the query using ROW_NUMBER()
self._using_row_number = True
# Lop off ORDER... and the initial "SELECT"
inner_select = _remove_order_limit_offset(raw_sql)
outer_fields, inner_select = self._alias_columns(inner_select)
order = _get_order_limit_offset(raw_sql)[0]
qn = self.connection.ops.quote_name
inner_table_name = qn('AAAA')
outer_fields, inner_select, order = self._fix_slicing_order(outer_fields, inner_select, order, inner_table_name)
# map a copy of outer_fields for injected subselect
f = []
for x in outer_fields.split(','):
i = x.upper().find(' AS ')
if i != -1:
x = x[i+4:]
if x.find('.') != -1:
tbl, col = x.rsplit('.', 1)
else:
col = x
f.append('{0}.{1}'.format(inner_table_name, col.strip()))
# inject a subselect to get around OVER requiring ORDER BY to come from FROM
inner_select = '{fields} FROM ( SELECT {inner} ) AS {inner_as}'.format(
fields=', '.join(f),
inner=inner_select,
inner_as=inner_table_name,
)
# IBM's DB2 cannot have a prefix of `_` for column names
row_num_col = 'django_pyodbc_row_num' if self.connection.ops.is_db2 else '_row_num'
where_row_num = '{0} < {row_num_col}'.format(self.query.low_mark, row_num_col=row_num_col)
if self.query.high_mark:
where_row_num += ' and {row_num_col} <= {0}'.format(self.query.high_mark, row_num_col=row_num_col)
# SQL Server 2000 doesn't support the `ROW_NUMBER()` function, thus it
# is necessary to use the `TOP` construct with `ORDER BY` so we can
# slice out a particular range of results.
if self.connection.ops.sql_server_ver < 2005 and not self.connection.ops.is_db2:
num_to_select = self.query.high_mark - self.query.low_mark
order_by_col_with_prefix,order_direction = order.rsplit(' ',1)
order_by_col = order_by_col_with_prefix.rsplit('.',1)[-1]
opposite_order_direction = REV_ODIR[order_direction]
sql = r'''
SELECT
1, -- placeholder for _row_num
* FROM
(
SELECT TOP
-- num_to_select
{num_to_select}
*
FROM
(
SELECT TOP
-- high_mark
{high_mark}
-- inner
{inner}
ORDER BY (
-- order_by_col
{left_sql_quote}AAAA{right_sql_quote}.{order_by_col}
)
-- order_direction
{order_direction}
) AS BBBB ORDER BY ({left_sql_quote}BBBB{right_sql_quote}.{order_by_col}) {opposite_order_direction}
) AS QQQQ ORDER BY ({left_sql_quote}QQQQ{right_sql_quote}.{order_by_col}) {order_direction}
'''.format(
inner=inner_select,
num_to_select=num_to_select,
high_mark=self.query.high_mark,
order_by_col=order_by_col,
order_direction=order_direction,
opposite_order_direction=opposite_order_direction,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
else:
sql = "SELECT {outer}, {row_num_col} FROM ( SELECT ROW_NUMBER() OVER ( ORDER BY {order}) as {row_num_col}, {inner}) as QQQ where {where}".format(
outer=outer_fields,
order=order,
inner=inner_select,
where=where_row_num,
row_num_col=row_num_col
)
return sql, fields
def _select_top(self,select,inner_sql,number_to_fetch):
if self.connection.ops.is_db2:
return "{select} {inner_sql} FETCH FIRST {number_to_fetch} ROWS ONLY".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
else:
return "{select} TOP {number_to_fetch} {inner_sql}".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name):
"""
Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing.
"""
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '{0}.{1} ASC'.format(
inner_table_name,
self.connection.ops.quote_name(column),
)
else:
alias_id = 0
# remap order for injected subselect
new_order = []
for x in order.split(','):
# find the ordering direction
m = _re_find_order_direction.search(x)
if m:
direction = m.groups()[0]
else:
direction = 'ASC'
# remove the ordering direction
x = _re_find_order_direction.sub('', x)
# remove any namespacing or table name from the column name
col = x.rsplit('.', 1)[-1]
# Is the ordering column missing from the inner select?
# 'inner_select' contains the full query without the leading 'SELECT '.
# It's possible that this can get a false hit if the ordering
# column is used in the WHERE while not being in the SELECT. It's
# not worth the complexity to properly handle that edge case.
if x not in inner_select:
# Ordering requires the column to be selected by the inner select
alias_id += 1
# alias column name
col = '{left_sql_quote}{0}___o{1}{right_sql_quote}'.format(
col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote),
alias_id,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
# add alias to inner_select
inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select)
new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction))
order = ', '.join(new_order)
return outer_fields, inner_select, order
def _alias_columns(self, sql):
"""Return tuple of SELECT and FROM clauses, aliasing duplicate column names."""
qn = self.connection.ops.quote_name
outer = list()
inner = list()
names_seen = list()
# replace all parens with placeholders
paren_depth, paren_buf = 0, ['']
parens, i = {}, 0
for ch in sql:
if ch == '(':
i += 1
paren_depth += 1
paren_buf.append('')
elif ch == ')':
paren_depth -= 1
key = '_placeholder_{0}'.format(i)
buf = paren_buf.pop()
# store the expanded paren string
buf = re.sub(r'%([^\(])', r'$$$\1', buf)
parens[key] = buf% parens
parens[key] = re.sub(r'\$\$\$([^\(])', r'%\1', parens[key])
#cannot use {} because IBM's DB2 uses {} as quotes
paren_buf[paren_depth] += '(%(' + key + ')s)'
else:
paren_buf[paren_depth] += ch
def _replace_sub(col):
"""Replace all placeholders with expanded values"""
while _re_col_placeholder.search(col):
col = col.format(**parens)
return col
temp_sql = ''.join(paren_buf)
# replace any bare %s with placeholders. Needed when the WHERE
# clause only contains one condition, and isn't wrapped in parens.
# the placeholder_data is used to prevent the variable "i" from
# being interpreted as a local variable in the replacement function
placeholder_data = { "i": i }
def _alias_placeholders(val):
i = placeholder_data["i"]
i += 1
placeholder_data["i"] = i
key = "_placeholder_{0}".format(i)
parens[key] = "%s"
return "%(" + key + ")s"
temp_sql = re.sub("%s", _alias_placeholders, temp_sql)
select_list, from_clause = _break(temp_sql, ' FROM ' + self.connection.ops.left_sql_quote)
for col in [x.strip() for x in select_list.split(',')]:
match = self._re_pat_col.search(col)
if match:
col_name = match.group(1)
col_key = col_name.lower()
if col_key in names_seen:
alias = qn('{0}___{1}'.format(col_name, names_seen.count(col_key)))
outer.append(alias)
inner.append('{0} as {1}'.format(_replace_sub(col), alias))
else:
outer.append(qn(col_name))
inner.append(_replace_sub(col))
names_seen.append(col_key)
else:
raise Exception('Unable to find a column name when parsing SQL: {0}'.format(col))
return ', '.join(outer), ', '.join(inner) + (from_clause % parens)
# ^^^^^^^^^^^^^^^^^^^^^
# We can't use `format` here, because `format` uses `{}` as special
# characters, but those happen to also be the quoting tokens for IBM's
# DB2
def get_ordering(self):
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
if getattr(self.query, '_mssql_ordering_not_allowed', False):
if django.VERSION[0] == 1 and django.VERSION[1] < 6:
return (None, [])
return (None, [], [])
return super(SQLCompiler, self).get_ordering()
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
# search for after table/column list
_re_values_sub = re.compile(r'(?P<prefix>\)|\])(?P<default>\s*|\s*default\s*)values(?P<suffix>\s*|\s+\()?', re.IGNORECASE)
# ... and insert the OUTPUT clause between it and the values list (or DEFAULT VALUES).
_values_repl = r'\g<prefix> OUTPUT INSERTED.{col} INTO @sqlserver_ado_return_id\g<default>VALUES\g<suffix>'
def as_sql(self, *args, **kwargs):
# Fix for Django ticket #14019
if not hasattr(self, 'return_id'):
self.return_id = False
result = super(SQLInsertCompiler, self).as_sql(*args, **kwargs)
if isinstance(result, list):
# Django 1.4 wraps return in list
return [self._fix_insert(x[0], x[1]) for x in result]
sql, params = result
return self._fix_insert(sql, params)
def _fix_insert(self, sql, params):
"""
Wrap the passed SQL with IDENTITY_INSERT statements and apply
other necessary fixes.
"""
meta = self.query.get_meta()
if meta.has_auto_field:
if hasattr(self.query, 'fields'):
# django 1.4 replaced columns with fields
fields = self.query.fields
auto_field = meta.auto_field
else:
# < django 1.4
fields = self.query.columns
auto_field = meta.auto_field.db_column or meta.auto_field.column
auto_in_fields = auto_field in fields
quoted_table = self.connection.ops.quote_name(meta.db_table)
if not fields or (auto_in_fields and len(fields) == 1 and not params):
# convert format when inserting only the primary key without
# specifying a value
sql = 'INSERT INTO {0} DEFAULT VALUES'.format(
quoted_table
)
params = []
elif auto_in_fields:
# wrap with identity insert
sql = 'SET IDENTITY_INSERT {table} ON;{sql};SET IDENTITY_INSERT {table} OFF'.format(
table=quoted_table,
sql=sql,
)
# mangle SQL to return ID from insert
# http://msdn.microsoft.com/en-us/library/ms177564.aspx
if self.return_id and self.connection.features.can_return_id_from_insert:
col = self.connection.ops.quote_name(meta.pk.db_column or meta.pk.get_attname())
# Determine datatype for use with the table variable that will return the inserted ID
pk_db_type = _re_data_type_terminator.split(meta.pk.db_type(self.connection))[0]
# NOCOUNT ON to prevent additional trigger/stored proc related resultsets
sql = 'SET NOCOUNT ON;{declare_table_var};{sql};{select_return_id}'.format(
sql=sql,
declare_table_var="DECLARE @sqlserver_ado_return_id table ({col_name} {pk_type})".format(
col_name=col,
pk_type=pk_db_type,
),
select_return_id="SELECT * FROM @sqlserver_ado_return_id",
)
output = self._values_repl.format(col=col)
sql = self._re_values_sub.sub(output, sql)
return sql, params
class SQLInsertCompiler2(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql_legacy(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
returns_id = bool(self.return_id and
self.connection.features.can_return_id_from_insert)
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
if returns_id:
result.append('OUTPUT inserted.%s' % qn(opts.pk.column))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = self.query.params
sql = ' '.join(result)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
if len(self.query.columns) == 1 and not params:
result = ['INSERT INTO %s' % quoted_table]
if returns_id:
result.append('OUTPUT inserted.%s' % qn(opts.pk.column))
result.append('DEFAULT VALUES')
sql = ' '.join(result)
else:
sql = "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
return sql, params
def as_sql(self):
if self.connection._DJANGO_VERSION < 14:
return self.as_sql_legacy()
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
columns = [f.column for f in fields]
result.append('(%s)' % ', '.join([qn(c) for c in columns]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
output = 'OUTPUT inserted.%s' % qn(opts.pk.column)
result.append(output)
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
return [(" ".join(result), tuple(params))]
items = [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
# This section deals with specifically setting the primary key,
# or using default values if necessary
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
out = []
for sql, params in items:
if auto_field_column in columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
# If there are no fields specified in the insert..
if not has_fields:
sql = "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql = "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
out.append([sql, params])
items = out
return items
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
def as_sql(self, qn=None):
self._fix_aggregates()
return super(SQLAggregateCompiler, self).as_sql()
# django's compiler.SQLDateCompiler was removed in 1.8
if DjangoVersion[0] > 1 or DjangoVersion[0] == 1 and DjangoVersion[1] >= 8:
import warnings
class DeprecatedMeta(type):
def __new__(cls, name, bases, attrs):
# if the metaclass is defined on the current class, it's not
# a subclass so we don't want to warn.
if attrs.get('__metaclass__') is not cls:
msg = ('In the 1.8 release of django, `SQLDateCompiler` was ' +
'removed. This was a parent class of `' + name +
'`, and thus `' + name + '` needs to be changed.')
raise ImportError(msg)
return super(DeprecatedMeta, cls).__new__(cls, name, bases, attrs)
class SQLDateCompiler(object):
__metaclass__ = DeprecatedMeta
class SQLDateTimeCompiler(object):
__metaclass__ = DeprecatedMeta
else:
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
class SQLDateTimeCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
|
lionheart/django-pyodbc | django_pyodbc/compiler.py | SQLCompiler._fix_aggregates | python | def _fix_aggregates(self):
try:
# for django 1.10 and up (works starting in 1.8 so I am told)
select = self.query.annotation_select
except AttributeError:
# older
select = self.query.aggregate_select
for alias, aggregate in select.items():
if not hasattr(aggregate, 'sql_function'):
continue
if aggregate.sql_function == 'AVG':# and self.connection.cast_avg_to_float:
# Embed the CAST in the template on this query to
# maintain multi-db support.
select[alias].sql_template = \
'%(function)s(CAST(%(field)s AS FLOAT))'
# translate StdDev function names
elif aggregate.sql_function == 'STDDEV_SAMP':
select[alias].sql_function = 'STDEV'
elif aggregate.sql_function == 'STDDEV_POP':
select[alias].sql_function = 'STDEVP'
# translate Variance function names
elif aggregate.sql_function == 'VAR_SAMP':
select[alias].sql_function = 'VAR'
elif aggregate.sql_function == 'VAR_POP':
select[alias].sql_function = 'VARP' | MSSQL doesn't match the behavior of the other backends on a few of
the aggregate functions; different return type behavior, different
function names, etc.
MSSQL's implementation of AVG maintains datatype without proding. To
match behavior of other django backends, it needs to not drop remainders.
E.g. AVG([1, 2]) needs to yield 1.5, not 1 | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/compiler.py#L171-L205 | null | class SQLCompiler(compiler.SQLCompiler):
def __init__(self,*args,**kwargs):
super(SQLCompiler,self).__init__(*args,**kwargs)
# Pattern to find the quoted column name at the end of a field
# specification
#
# E.g., if you're talking to MS SQL this regex would become
# \[([^\[]+)\]$
#
# This would match the underlined part of the following string:
# [foo_table][bar_column]
# ^^^^^^^^^^^^
self._re_pat_col = re.compile(
r"\{left_sql_quote}([^\{left_sql_quote}]+)\{right_sql_quote}$".format(
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote))
def compile(self, node, select_format=False):
if self.connection.ops.is_openedge and type(node) is where.WhereNode:
for val in node.children:
# If we too many more of these special cases we should probably move them to another file
if type(val.rhs) == date or type(val.lhs) == date:
setattr(val, 'as_microsoft', types.MethodType(where_date, val))
args = [node]
if select_format:
args.append(select_format)
return super(SQLCompiler, self).compile(*args)
def resolve_columns(self, row, fields=()):
# If the results are sliced, the resultset will have an initial
# "row number" column. Remove this column before the ORM sees it.
if getattr(self, '_using_row_number', False):
row = row[1:]
values = []
index_extra_select = len(self.query.extra_select)
for value, field in zip_longest(row[index_extra_select:], fields):
# print '\tfield=%s\tvalue=%s' % (repr(field), repr(value))
if field:
try:
value = self.connection.ops.convert_values(value, field)
except ValueError:
pass
values.append(value)
return row[:index_extra_select] + tuple(values)
def as_sql(self, with_limits=True, with_col_aliases=False, qn=None, **kwargs):
self.pre_sql_setup()
# Django #12192 - Don't execute any DB query when QS slicing results in limit 0
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self._fix_aggregates()
self._using_row_number = False
# Get out of the way if we're not a select query or there's no limiting involved.
check_limits = with_limits and (self.query.low_mark or self.query.high_mark is not None)
if not check_limits:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
try:
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
result = super(SQLCompiler, self).as_sql(with_limits, with_col_aliases, **kwargs)
finally:
# remove in case query is every reused
delattr(self.query, '_mssql_ordering_not_allowed')
return result
raw_sql, fields = super(SQLCompiler, self).as_sql(False, with_col_aliases, **kwargs)
# Check for high mark only and replace with "TOP"
if self.query.high_mark is not None and not self.query.low_mark:
if self.connection.ops.is_db2:
sql = self._select_top('', raw_sql, self.query.high_mark)
else:
_select = 'SELECT'
if self.query.distinct:
_select += ' DISTINCT'
sql = re.sub(r'(?i)^{0}'.format(_select), '{0} TOP {1}'.format(_select, self.query.high_mark), raw_sql, 1)
return sql, fields
# Else we have limits; rewrite the query using ROW_NUMBER()
self._using_row_number = True
# Lop off ORDER... and the initial "SELECT"
inner_select = _remove_order_limit_offset(raw_sql)
outer_fields, inner_select = self._alias_columns(inner_select)
order = _get_order_limit_offset(raw_sql)[0]
qn = self.connection.ops.quote_name
inner_table_name = qn('AAAA')
outer_fields, inner_select, order = self._fix_slicing_order(outer_fields, inner_select, order, inner_table_name)
# map a copy of outer_fields for injected subselect
f = []
for x in outer_fields.split(','):
i = x.upper().find(' AS ')
if i != -1:
x = x[i+4:]
if x.find('.') != -1:
tbl, col = x.rsplit('.', 1)
else:
col = x
f.append('{0}.{1}'.format(inner_table_name, col.strip()))
# inject a subselect to get around OVER requiring ORDER BY to come from FROM
inner_select = '{fields} FROM ( SELECT {inner} ) AS {inner_as}'.format(
fields=', '.join(f),
inner=inner_select,
inner_as=inner_table_name,
)
# IBM's DB2 cannot have a prefix of `_` for column names
row_num_col = 'django_pyodbc_row_num' if self.connection.ops.is_db2 else '_row_num'
where_row_num = '{0} < {row_num_col}'.format(self.query.low_mark, row_num_col=row_num_col)
if self.query.high_mark:
where_row_num += ' and {row_num_col} <= {0}'.format(self.query.high_mark, row_num_col=row_num_col)
# SQL Server 2000 doesn't support the `ROW_NUMBER()` function, thus it
# is necessary to use the `TOP` construct with `ORDER BY` so we can
# slice out a particular range of results.
if self.connection.ops.sql_server_ver < 2005 and not self.connection.ops.is_db2:
num_to_select = self.query.high_mark - self.query.low_mark
order_by_col_with_prefix,order_direction = order.rsplit(' ',1)
order_by_col = order_by_col_with_prefix.rsplit('.',1)[-1]
opposite_order_direction = REV_ODIR[order_direction]
sql = r'''
SELECT
1, -- placeholder for _row_num
* FROM
(
SELECT TOP
-- num_to_select
{num_to_select}
*
FROM
(
SELECT TOP
-- high_mark
{high_mark}
-- inner
{inner}
ORDER BY (
-- order_by_col
{left_sql_quote}AAAA{right_sql_quote}.{order_by_col}
)
-- order_direction
{order_direction}
) AS BBBB ORDER BY ({left_sql_quote}BBBB{right_sql_quote}.{order_by_col}) {opposite_order_direction}
) AS QQQQ ORDER BY ({left_sql_quote}QQQQ{right_sql_quote}.{order_by_col}) {order_direction}
'''.format(
inner=inner_select,
num_to_select=num_to_select,
high_mark=self.query.high_mark,
order_by_col=order_by_col,
order_direction=order_direction,
opposite_order_direction=opposite_order_direction,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
else:
sql = "SELECT {outer}, {row_num_col} FROM ( SELECT ROW_NUMBER() OVER ( ORDER BY {order}) as {row_num_col}, {inner}) as QQQ where {where}".format(
outer=outer_fields,
order=order,
inner=inner_select,
where=where_row_num,
row_num_col=row_num_col
)
return sql, fields
def _select_top(self,select,inner_sql,number_to_fetch):
if self.connection.ops.is_db2:
return "{select} {inner_sql} FETCH FIRST {number_to_fetch} ROWS ONLY".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
else:
return "{select} TOP {number_to_fetch} {inner_sql}".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name):
"""
Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing.
"""
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '{0}.{1} ASC'.format(
inner_table_name,
self.connection.ops.quote_name(column),
)
else:
alias_id = 0
# remap order for injected subselect
new_order = []
for x in order.split(','):
# find the ordering direction
m = _re_find_order_direction.search(x)
if m:
direction = m.groups()[0]
else:
direction = 'ASC'
# remove the ordering direction
x = _re_find_order_direction.sub('', x)
# remove any namespacing or table name from the column name
col = x.rsplit('.', 1)[-1]
# Is the ordering column missing from the inner select?
# 'inner_select' contains the full query without the leading 'SELECT '.
# It's possible that this can get a false hit if the ordering
# column is used in the WHERE while not being in the SELECT. It's
# not worth the complexity to properly handle that edge case.
if x not in inner_select:
# Ordering requires the column to be selected by the inner select
alias_id += 1
# alias column name
col = '{left_sql_quote}{0}___o{1}{right_sql_quote}'.format(
col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote),
alias_id,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
# add alias to inner_select
inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select)
new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction))
order = ', '.join(new_order)
return outer_fields, inner_select, order
def _alias_columns(self, sql):
"""Return tuple of SELECT and FROM clauses, aliasing duplicate column names."""
qn = self.connection.ops.quote_name
outer = list()
inner = list()
names_seen = list()
# replace all parens with placeholders
paren_depth, paren_buf = 0, ['']
parens, i = {}, 0
for ch in sql:
if ch == '(':
i += 1
paren_depth += 1
paren_buf.append('')
elif ch == ')':
paren_depth -= 1
key = '_placeholder_{0}'.format(i)
buf = paren_buf.pop()
# store the expanded paren string
buf = re.sub(r'%([^\(])', r'$$$\1', buf)
parens[key] = buf% parens
parens[key] = re.sub(r'\$\$\$([^\(])', r'%\1', parens[key])
#cannot use {} because IBM's DB2 uses {} as quotes
paren_buf[paren_depth] += '(%(' + key + ')s)'
else:
paren_buf[paren_depth] += ch
def _replace_sub(col):
"""Replace all placeholders with expanded values"""
while _re_col_placeholder.search(col):
col = col.format(**parens)
return col
temp_sql = ''.join(paren_buf)
# replace any bare %s with placeholders. Needed when the WHERE
# clause only contains one condition, and isn't wrapped in parens.
# the placeholder_data is used to prevent the variable "i" from
# being interpreted as a local variable in the replacement function
placeholder_data = { "i": i }
def _alias_placeholders(val):
i = placeholder_data["i"]
i += 1
placeholder_data["i"] = i
key = "_placeholder_{0}".format(i)
parens[key] = "%s"
return "%(" + key + ")s"
temp_sql = re.sub("%s", _alias_placeholders, temp_sql)
select_list, from_clause = _break(temp_sql, ' FROM ' + self.connection.ops.left_sql_quote)
for col in [x.strip() for x in select_list.split(',')]:
match = self._re_pat_col.search(col)
if match:
col_name = match.group(1)
col_key = col_name.lower()
if col_key in names_seen:
alias = qn('{0}___{1}'.format(col_name, names_seen.count(col_key)))
outer.append(alias)
inner.append('{0} as {1}'.format(_replace_sub(col), alias))
else:
outer.append(qn(col_name))
inner.append(_replace_sub(col))
names_seen.append(col_key)
else:
raise Exception('Unable to find a column name when parsing SQL: {0}'.format(col))
return ', '.join(outer), ', '.join(inner) + (from_clause % parens)
# ^^^^^^^^^^^^^^^^^^^^^
# We can't use `format` here, because `format` uses `{}` as special
# characters, but those happen to also be the quoting tokens for IBM's
# DB2
def get_ordering(self):
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
if getattr(self.query, '_mssql_ordering_not_allowed', False):
if django.VERSION[0] == 1 and django.VERSION[1] < 6:
return (None, [])
return (None, [], [])
return super(SQLCompiler, self).get_ordering()
|
lionheart/django-pyodbc | django_pyodbc/compiler.py | SQLCompiler._fix_slicing_order | python | def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name):
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '{0}.{1} ASC'.format(
inner_table_name,
self.connection.ops.quote_name(column),
)
else:
alias_id = 0
# remap order for injected subselect
new_order = []
for x in order.split(','):
# find the ordering direction
m = _re_find_order_direction.search(x)
if m:
direction = m.groups()[0]
else:
direction = 'ASC'
# remove the ordering direction
x = _re_find_order_direction.sub('', x)
# remove any namespacing or table name from the column name
col = x.rsplit('.', 1)[-1]
# Is the ordering column missing from the inner select?
# 'inner_select' contains the full query without the leading 'SELECT '.
# It's possible that this can get a false hit if the ordering
# column is used in the WHERE while not being in the SELECT. It's
# not worth the complexity to properly handle that edge case.
if x not in inner_select:
# Ordering requires the column to be selected by the inner select
alias_id += 1
# alias column name
col = '{left_sql_quote}{0}___o{1}{right_sql_quote}'.format(
col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote),
alias_id,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
# add alias to inner_select
inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select)
new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction))
order = ', '.join(new_order)
return outer_fields, inner_select, order | Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/compiler.py#L346-L393 | null | class SQLCompiler(compiler.SQLCompiler):
def __init__(self,*args,**kwargs):
super(SQLCompiler,self).__init__(*args,**kwargs)
# Pattern to find the quoted column name at the end of a field
# specification
#
# E.g., if you're talking to MS SQL this regex would become
# \[([^\[]+)\]$
#
# This would match the underlined part of the following string:
# [foo_table][bar_column]
# ^^^^^^^^^^^^
self._re_pat_col = re.compile(
r"\{left_sql_quote}([^\{left_sql_quote}]+)\{right_sql_quote}$".format(
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote))
def compile(self, node, select_format=False):
if self.connection.ops.is_openedge and type(node) is where.WhereNode:
for val in node.children:
# If we too many more of these special cases we should probably move them to another file
if type(val.rhs) == date or type(val.lhs) == date:
setattr(val, 'as_microsoft', types.MethodType(where_date, val))
args = [node]
if select_format:
args.append(select_format)
return super(SQLCompiler, self).compile(*args)
def resolve_columns(self, row, fields=()):
# If the results are sliced, the resultset will have an initial
# "row number" column. Remove this column before the ORM sees it.
if getattr(self, '_using_row_number', False):
row = row[1:]
values = []
index_extra_select = len(self.query.extra_select)
for value, field in zip_longest(row[index_extra_select:], fields):
# print '\tfield=%s\tvalue=%s' % (repr(field), repr(value))
if field:
try:
value = self.connection.ops.convert_values(value, field)
except ValueError:
pass
values.append(value)
return row[:index_extra_select] + tuple(values)
def _fix_aggregates(self):
"""
MSSQL doesn't match the behavior of the other backends on a few of
the aggregate functions; different return type behavior, different
function names, etc.
MSSQL's implementation of AVG maintains datatype without proding. To
match behavior of other django backends, it needs to not drop remainders.
E.g. AVG([1, 2]) needs to yield 1.5, not 1
"""
try:
# for django 1.10 and up (works starting in 1.8 so I am told)
select = self.query.annotation_select
except AttributeError:
# older
select = self.query.aggregate_select
for alias, aggregate in select.items():
if not hasattr(aggregate, 'sql_function'):
continue
if aggregate.sql_function == 'AVG':# and self.connection.cast_avg_to_float:
# Embed the CAST in the template on this query to
# maintain multi-db support.
select[alias].sql_template = \
'%(function)s(CAST(%(field)s AS FLOAT))'
# translate StdDev function names
elif aggregate.sql_function == 'STDDEV_SAMP':
select[alias].sql_function = 'STDEV'
elif aggregate.sql_function == 'STDDEV_POP':
select[alias].sql_function = 'STDEVP'
# translate Variance function names
elif aggregate.sql_function == 'VAR_SAMP':
select[alias].sql_function = 'VAR'
elif aggregate.sql_function == 'VAR_POP':
select[alias].sql_function = 'VARP'
def as_sql(self, with_limits=True, with_col_aliases=False, qn=None, **kwargs):
self.pre_sql_setup()
# Django #12192 - Don't execute any DB query when QS slicing results in limit 0
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self._fix_aggregates()
self._using_row_number = False
# Get out of the way if we're not a select query or there's no limiting involved.
check_limits = with_limits and (self.query.low_mark or self.query.high_mark is not None)
if not check_limits:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
try:
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
result = super(SQLCompiler, self).as_sql(with_limits, with_col_aliases, **kwargs)
finally:
# remove in case query is every reused
delattr(self.query, '_mssql_ordering_not_allowed')
return result
raw_sql, fields = super(SQLCompiler, self).as_sql(False, with_col_aliases, **kwargs)
# Check for high mark only and replace with "TOP"
if self.query.high_mark is not None and not self.query.low_mark:
if self.connection.ops.is_db2:
sql = self._select_top('', raw_sql, self.query.high_mark)
else:
_select = 'SELECT'
if self.query.distinct:
_select += ' DISTINCT'
sql = re.sub(r'(?i)^{0}'.format(_select), '{0} TOP {1}'.format(_select, self.query.high_mark), raw_sql, 1)
return sql, fields
# Else we have limits; rewrite the query using ROW_NUMBER()
self._using_row_number = True
# Lop off ORDER... and the initial "SELECT"
inner_select = _remove_order_limit_offset(raw_sql)
outer_fields, inner_select = self._alias_columns(inner_select)
order = _get_order_limit_offset(raw_sql)[0]
qn = self.connection.ops.quote_name
inner_table_name = qn('AAAA')
outer_fields, inner_select, order = self._fix_slicing_order(outer_fields, inner_select, order, inner_table_name)
# map a copy of outer_fields for injected subselect
f = []
for x in outer_fields.split(','):
i = x.upper().find(' AS ')
if i != -1:
x = x[i+4:]
if x.find('.') != -1:
tbl, col = x.rsplit('.', 1)
else:
col = x
f.append('{0}.{1}'.format(inner_table_name, col.strip()))
# inject a subselect to get around OVER requiring ORDER BY to come from FROM
inner_select = '{fields} FROM ( SELECT {inner} ) AS {inner_as}'.format(
fields=', '.join(f),
inner=inner_select,
inner_as=inner_table_name,
)
# IBM's DB2 cannot have a prefix of `_` for column names
row_num_col = 'django_pyodbc_row_num' if self.connection.ops.is_db2 else '_row_num'
where_row_num = '{0} < {row_num_col}'.format(self.query.low_mark, row_num_col=row_num_col)
if self.query.high_mark:
where_row_num += ' and {row_num_col} <= {0}'.format(self.query.high_mark, row_num_col=row_num_col)
# SQL Server 2000 doesn't support the `ROW_NUMBER()` function, thus it
# is necessary to use the `TOP` construct with `ORDER BY` so we can
# slice out a particular range of results.
if self.connection.ops.sql_server_ver < 2005 and not self.connection.ops.is_db2:
num_to_select = self.query.high_mark - self.query.low_mark
order_by_col_with_prefix,order_direction = order.rsplit(' ',1)
order_by_col = order_by_col_with_prefix.rsplit('.',1)[-1]
opposite_order_direction = REV_ODIR[order_direction]
sql = r'''
SELECT
1, -- placeholder for _row_num
* FROM
(
SELECT TOP
-- num_to_select
{num_to_select}
*
FROM
(
SELECT TOP
-- high_mark
{high_mark}
-- inner
{inner}
ORDER BY (
-- order_by_col
{left_sql_quote}AAAA{right_sql_quote}.{order_by_col}
)
-- order_direction
{order_direction}
) AS BBBB ORDER BY ({left_sql_quote}BBBB{right_sql_quote}.{order_by_col}) {opposite_order_direction}
) AS QQQQ ORDER BY ({left_sql_quote}QQQQ{right_sql_quote}.{order_by_col}) {order_direction}
'''.format(
inner=inner_select,
num_to_select=num_to_select,
high_mark=self.query.high_mark,
order_by_col=order_by_col,
order_direction=order_direction,
opposite_order_direction=opposite_order_direction,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
else:
sql = "SELECT {outer}, {row_num_col} FROM ( SELECT ROW_NUMBER() OVER ( ORDER BY {order}) as {row_num_col}, {inner}) as QQQ where {where}".format(
outer=outer_fields,
order=order,
inner=inner_select,
where=where_row_num,
row_num_col=row_num_col
)
return sql, fields
def _select_top(self,select,inner_sql,number_to_fetch):
if self.connection.ops.is_db2:
return "{select} {inner_sql} FETCH FIRST {number_to_fetch} ROWS ONLY".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
else:
return "{select} TOP {number_to_fetch} {inner_sql}".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
def _alias_columns(self, sql):
"""Return tuple of SELECT and FROM clauses, aliasing duplicate column names."""
qn = self.connection.ops.quote_name
outer = list()
inner = list()
names_seen = list()
# replace all parens with placeholders
paren_depth, paren_buf = 0, ['']
parens, i = {}, 0
for ch in sql:
if ch == '(':
i += 1
paren_depth += 1
paren_buf.append('')
elif ch == ')':
paren_depth -= 1
key = '_placeholder_{0}'.format(i)
buf = paren_buf.pop()
# store the expanded paren string
buf = re.sub(r'%([^\(])', r'$$$\1', buf)
parens[key] = buf% parens
parens[key] = re.sub(r'\$\$\$([^\(])', r'%\1', parens[key])
#cannot use {} because IBM's DB2 uses {} as quotes
paren_buf[paren_depth] += '(%(' + key + ')s)'
else:
paren_buf[paren_depth] += ch
def _replace_sub(col):
"""Replace all placeholders with expanded values"""
while _re_col_placeholder.search(col):
col = col.format(**parens)
return col
temp_sql = ''.join(paren_buf)
# replace any bare %s with placeholders. Needed when the WHERE
# clause only contains one condition, and isn't wrapped in parens.
# the placeholder_data is used to prevent the variable "i" from
# being interpreted as a local variable in the replacement function
placeholder_data = { "i": i }
def _alias_placeholders(val):
i = placeholder_data["i"]
i += 1
placeholder_data["i"] = i
key = "_placeholder_{0}".format(i)
parens[key] = "%s"
return "%(" + key + ")s"
temp_sql = re.sub("%s", _alias_placeholders, temp_sql)
select_list, from_clause = _break(temp_sql, ' FROM ' + self.connection.ops.left_sql_quote)
for col in [x.strip() for x in select_list.split(',')]:
match = self._re_pat_col.search(col)
if match:
col_name = match.group(1)
col_key = col_name.lower()
if col_key in names_seen:
alias = qn('{0}___{1}'.format(col_name, names_seen.count(col_key)))
outer.append(alias)
inner.append('{0} as {1}'.format(_replace_sub(col), alias))
else:
outer.append(qn(col_name))
inner.append(_replace_sub(col))
names_seen.append(col_key)
else:
raise Exception('Unable to find a column name when parsing SQL: {0}'.format(col))
return ', '.join(outer), ', '.join(inner) + (from_clause % parens)
# ^^^^^^^^^^^^^^^^^^^^^
# We can't use `format` here, because `format` uses `{}` as special
# characters, but those happen to also be the quoting tokens for IBM's
# DB2
def get_ordering(self):
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
if getattr(self.query, '_mssql_ordering_not_allowed', False):
if django.VERSION[0] == 1 and django.VERSION[1] < 6:
return (None, [])
return (None, [], [])
return super(SQLCompiler, self).get_ordering()
|
lionheart/django-pyodbc | django_pyodbc/compiler.py | SQLCompiler._alias_columns | python | def _alias_columns(self, sql):
qn = self.connection.ops.quote_name
outer = list()
inner = list()
names_seen = list()
# replace all parens with placeholders
paren_depth, paren_buf = 0, ['']
parens, i = {}, 0
for ch in sql:
if ch == '(':
i += 1
paren_depth += 1
paren_buf.append('')
elif ch == ')':
paren_depth -= 1
key = '_placeholder_{0}'.format(i)
buf = paren_buf.pop()
# store the expanded paren string
buf = re.sub(r'%([^\(])', r'$$$\1', buf)
parens[key] = buf% parens
parens[key] = re.sub(r'\$\$\$([^\(])', r'%\1', parens[key])
#cannot use {} because IBM's DB2 uses {} as quotes
paren_buf[paren_depth] += '(%(' + key + ')s)'
else:
paren_buf[paren_depth] += ch
def _replace_sub(col):
"""Replace all placeholders with expanded values"""
while _re_col_placeholder.search(col):
col = col.format(**parens)
return col
temp_sql = ''.join(paren_buf)
# replace any bare %s with placeholders. Needed when the WHERE
# clause only contains one condition, and isn't wrapped in parens.
# the placeholder_data is used to prevent the variable "i" from
# being interpreted as a local variable in the replacement function
placeholder_data = { "i": i }
def _alias_placeholders(val):
i = placeholder_data["i"]
i += 1
placeholder_data["i"] = i
key = "_placeholder_{0}".format(i)
parens[key] = "%s"
return "%(" + key + ")s"
temp_sql = re.sub("%s", _alias_placeholders, temp_sql)
select_list, from_clause = _break(temp_sql, ' FROM ' + self.connection.ops.left_sql_quote)
for col in [x.strip() for x in select_list.split(',')]:
match = self._re_pat_col.search(col)
if match:
col_name = match.group(1)
col_key = col_name.lower()
if col_key in names_seen:
alias = qn('{0}___{1}'.format(col_name, names_seen.count(col_key)))
outer.append(alias)
inner.append('{0} as {1}'.format(_replace_sub(col), alias))
else:
outer.append(qn(col_name))
inner.append(_replace_sub(col))
names_seen.append(col_key)
else:
raise Exception('Unable to find a column name when parsing SQL: {0}'.format(col))
return ', '.join(outer), ', '.join(inner) + (from_clause % parens) | Return tuple of SELECT and FROM clauses, aliasing duplicate column names. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/compiler.py#L395-L468 | null | class SQLCompiler(compiler.SQLCompiler):
def __init__(self,*args,**kwargs):
super(SQLCompiler,self).__init__(*args,**kwargs)
# Pattern to find the quoted column name at the end of a field
# specification
#
# E.g., if you're talking to MS SQL this regex would become
# \[([^\[]+)\]$
#
# This would match the underlined part of the following string:
# [foo_table][bar_column]
# ^^^^^^^^^^^^
self._re_pat_col = re.compile(
r"\{left_sql_quote}([^\{left_sql_quote}]+)\{right_sql_quote}$".format(
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote))
def compile(self, node, select_format=False):
if self.connection.ops.is_openedge and type(node) is where.WhereNode:
for val in node.children:
# If we too many more of these special cases we should probably move them to another file
if type(val.rhs) == date or type(val.lhs) == date:
setattr(val, 'as_microsoft', types.MethodType(where_date, val))
args = [node]
if select_format:
args.append(select_format)
return super(SQLCompiler, self).compile(*args)
def resolve_columns(self, row, fields=()):
# If the results are sliced, the resultset will have an initial
# "row number" column. Remove this column before the ORM sees it.
if getattr(self, '_using_row_number', False):
row = row[1:]
values = []
index_extra_select = len(self.query.extra_select)
for value, field in zip_longest(row[index_extra_select:], fields):
# print '\tfield=%s\tvalue=%s' % (repr(field), repr(value))
if field:
try:
value = self.connection.ops.convert_values(value, field)
except ValueError:
pass
values.append(value)
return row[:index_extra_select] + tuple(values)
def _fix_aggregates(self):
"""
MSSQL doesn't match the behavior of the other backends on a few of
the aggregate functions; different return type behavior, different
function names, etc.
MSSQL's implementation of AVG maintains datatype without proding. To
match behavior of other django backends, it needs to not drop remainders.
E.g. AVG([1, 2]) needs to yield 1.5, not 1
"""
try:
# for django 1.10 and up (works starting in 1.8 so I am told)
select = self.query.annotation_select
except AttributeError:
# older
select = self.query.aggregate_select
for alias, aggregate in select.items():
if not hasattr(aggregate, 'sql_function'):
continue
if aggregate.sql_function == 'AVG':# and self.connection.cast_avg_to_float:
# Embed the CAST in the template on this query to
# maintain multi-db support.
select[alias].sql_template = \
'%(function)s(CAST(%(field)s AS FLOAT))'
# translate StdDev function names
elif aggregate.sql_function == 'STDDEV_SAMP':
select[alias].sql_function = 'STDEV'
elif aggregate.sql_function == 'STDDEV_POP':
select[alias].sql_function = 'STDEVP'
# translate Variance function names
elif aggregate.sql_function == 'VAR_SAMP':
select[alias].sql_function = 'VAR'
elif aggregate.sql_function == 'VAR_POP':
select[alias].sql_function = 'VARP'
def as_sql(self, with_limits=True, with_col_aliases=False, qn=None, **kwargs):
self.pre_sql_setup()
# Django #12192 - Don't execute any DB query when QS slicing results in limit 0
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self._fix_aggregates()
self._using_row_number = False
# Get out of the way if we're not a select query or there's no limiting involved.
check_limits = with_limits and (self.query.low_mark or self.query.high_mark is not None)
if not check_limits:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
try:
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
result = super(SQLCompiler, self).as_sql(with_limits, with_col_aliases, **kwargs)
finally:
# remove in case query is every reused
delattr(self.query, '_mssql_ordering_not_allowed')
return result
raw_sql, fields = super(SQLCompiler, self).as_sql(False, with_col_aliases, **kwargs)
# Check for high mark only and replace with "TOP"
if self.query.high_mark is not None and not self.query.low_mark:
if self.connection.ops.is_db2:
sql = self._select_top('', raw_sql, self.query.high_mark)
else:
_select = 'SELECT'
if self.query.distinct:
_select += ' DISTINCT'
sql = re.sub(r'(?i)^{0}'.format(_select), '{0} TOP {1}'.format(_select, self.query.high_mark), raw_sql, 1)
return sql, fields
# Else we have limits; rewrite the query using ROW_NUMBER()
self._using_row_number = True
# Lop off ORDER... and the initial "SELECT"
inner_select = _remove_order_limit_offset(raw_sql)
outer_fields, inner_select = self._alias_columns(inner_select)
order = _get_order_limit_offset(raw_sql)[0]
qn = self.connection.ops.quote_name
inner_table_name = qn('AAAA')
outer_fields, inner_select, order = self._fix_slicing_order(outer_fields, inner_select, order, inner_table_name)
# map a copy of outer_fields for injected subselect
f = []
for x in outer_fields.split(','):
i = x.upper().find(' AS ')
if i != -1:
x = x[i+4:]
if x.find('.') != -1:
tbl, col = x.rsplit('.', 1)
else:
col = x
f.append('{0}.{1}'.format(inner_table_name, col.strip()))
# inject a subselect to get around OVER requiring ORDER BY to come from FROM
inner_select = '{fields} FROM ( SELECT {inner} ) AS {inner_as}'.format(
fields=', '.join(f),
inner=inner_select,
inner_as=inner_table_name,
)
# IBM's DB2 cannot have a prefix of `_` for column names
row_num_col = 'django_pyodbc_row_num' if self.connection.ops.is_db2 else '_row_num'
where_row_num = '{0} < {row_num_col}'.format(self.query.low_mark, row_num_col=row_num_col)
if self.query.high_mark:
where_row_num += ' and {row_num_col} <= {0}'.format(self.query.high_mark, row_num_col=row_num_col)
# SQL Server 2000 doesn't support the `ROW_NUMBER()` function, thus it
# is necessary to use the `TOP` construct with `ORDER BY` so we can
# slice out a particular range of results.
if self.connection.ops.sql_server_ver < 2005 and not self.connection.ops.is_db2:
num_to_select = self.query.high_mark - self.query.low_mark
order_by_col_with_prefix,order_direction = order.rsplit(' ',1)
order_by_col = order_by_col_with_prefix.rsplit('.',1)[-1]
opposite_order_direction = REV_ODIR[order_direction]
sql = r'''
SELECT
1, -- placeholder for _row_num
* FROM
(
SELECT TOP
-- num_to_select
{num_to_select}
*
FROM
(
SELECT TOP
-- high_mark
{high_mark}
-- inner
{inner}
ORDER BY (
-- order_by_col
{left_sql_quote}AAAA{right_sql_quote}.{order_by_col}
)
-- order_direction
{order_direction}
) AS BBBB ORDER BY ({left_sql_quote}BBBB{right_sql_quote}.{order_by_col}) {opposite_order_direction}
) AS QQQQ ORDER BY ({left_sql_quote}QQQQ{right_sql_quote}.{order_by_col}) {order_direction}
'''.format(
inner=inner_select,
num_to_select=num_to_select,
high_mark=self.query.high_mark,
order_by_col=order_by_col,
order_direction=order_direction,
opposite_order_direction=opposite_order_direction,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
else:
sql = "SELECT {outer}, {row_num_col} FROM ( SELECT ROW_NUMBER() OVER ( ORDER BY {order}) as {row_num_col}, {inner}) as QQQ where {where}".format(
outer=outer_fields,
order=order,
inner=inner_select,
where=where_row_num,
row_num_col=row_num_col
)
return sql, fields
def _select_top(self,select,inner_sql,number_to_fetch):
if self.connection.ops.is_db2:
return "{select} {inner_sql} FETCH FIRST {number_to_fetch} ROWS ONLY".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
else:
return "{select} TOP {number_to_fetch} {inner_sql}".format(
select=select, inner_sql=inner_sql, number_to_fetch=number_to_fetch)
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name):
"""
Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing.
"""
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '{0}.{1} ASC'.format(
inner_table_name,
self.connection.ops.quote_name(column),
)
else:
alias_id = 0
# remap order for injected subselect
new_order = []
for x in order.split(','):
# find the ordering direction
m = _re_find_order_direction.search(x)
if m:
direction = m.groups()[0]
else:
direction = 'ASC'
# remove the ordering direction
x = _re_find_order_direction.sub('', x)
# remove any namespacing or table name from the column name
col = x.rsplit('.', 1)[-1]
# Is the ordering column missing from the inner select?
# 'inner_select' contains the full query without the leading 'SELECT '.
# It's possible that this can get a false hit if the ordering
# column is used in the WHERE while not being in the SELECT. It's
# not worth the complexity to properly handle that edge case.
if x not in inner_select:
# Ordering requires the column to be selected by the inner select
alias_id += 1
# alias column name
col = '{left_sql_quote}{0}___o{1}{right_sql_quote}'.format(
col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote),
alias_id,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
# add alias to inner_select
inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select)
new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction))
order = ', '.join(new_order)
return outer_fields, inner_select, order
# ^^^^^^^^^^^^^^^^^^^^^
# We can't use `format` here, because `format` uses `{}` as special
# characters, but those happen to also be the quoting tokens for IBM's
# DB2
def get_ordering(self):
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
if getattr(self.query, '_mssql_ordering_not_allowed', False):
if django.VERSION[0] == 1 and django.VERSION[1] < 6:
return (None, [])
return (None, [], [])
return super(SQLCompiler, self).get_ordering()
|
lionheart/django-pyodbc | django_pyodbc/compiler.py | SQLInsertCompiler._fix_insert | python | def _fix_insert(self, sql, params):
meta = self.query.get_meta()
if meta.has_auto_field:
if hasattr(self.query, 'fields'):
# django 1.4 replaced columns with fields
fields = self.query.fields
auto_field = meta.auto_field
else:
# < django 1.4
fields = self.query.columns
auto_field = meta.auto_field.db_column or meta.auto_field.column
auto_in_fields = auto_field in fields
quoted_table = self.connection.ops.quote_name(meta.db_table)
if not fields or (auto_in_fields and len(fields) == 1 and not params):
# convert format when inserting only the primary key without
# specifying a value
sql = 'INSERT INTO {0} DEFAULT VALUES'.format(
quoted_table
)
params = []
elif auto_in_fields:
# wrap with identity insert
sql = 'SET IDENTITY_INSERT {table} ON;{sql};SET IDENTITY_INSERT {table} OFF'.format(
table=quoted_table,
sql=sql,
)
# mangle SQL to return ID from insert
# http://msdn.microsoft.com/en-us/library/ms177564.aspx
if self.return_id and self.connection.features.can_return_id_from_insert:
col = self.connection.ops.quote_name(meta.pk.db_column or meta.pk.get_attname())
# Determine datatype for use with the table variable that will return the inserted ID
pk_db_type = _re_data_type_terminator.split(meta.pk.db_type(self.connection))[0]
# NOCOUNT ON to prevent additional trigger/stored proc related resultsets
sql = 'SET NOCOUNT ON;{declare_table_var};{sql};{select_return_id}'.format(
sql=sql,
declare_table_var="DECLARE @sqlserver_ado_return_id table ({col_name} {pk_type})".format(
col_name=col,
pk_type=pk_db_type,
),
select_return_id="SELECT * FROM @sqlserver_ado_return_id",
)
output = self._values_repl.format(col=col)
sql = self._re_values_sub.sub(output, sql)
return sql, params | Wrap the passed SQL with IDENTITY_INSERT statements and apply
other necessary fixes. | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/compiler.py#L506-L561 | null | class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
# search for after table/column list
_re_values_sub = re.compile(r'(?P<prefix>\)|\])(?P<default>\s*|\s*default\s*)values(?P<suffix>\s*|\s+\()?', re.IGNORECASE)
# ... and insert the OUTPUT clause between it and the values list (or DEFAULT VALUES).
_values_repl = r'\g<prefix> OUTPUT INSERTED.{col} INTO @sqlserver_ado_return_id\g<default>VALUES\g<suffix>'
def as_sql(self, *args, **kwargs):
# Fix for Django ticket #14019
if not hasattr(self, 'return_id'):
self.return_id = False
result = super(SQLInsertCompiler, self).as_sql(*args, **kwargs)
if isinstance(result, list):
# Django 1.4 wraps return in list
return [self._fix_insert(x[0], x[1]) for x in result]
sql, params = result
return self._fix_insert(sql, params)
|
lionheart/django-pyodbc | django_pyodbc/base.py | CursorWrapper.format_results | python | def format_results(self, rows):
needs_utc = _DJANGO_VERSION >= 14 and settings.USE_TZ
if not (needs_utc or not self.driver_supports_utf8):
return tuple(rows)
# FreeTDS (and other ODBC drivers?) don't support Unicode yet, so we
# need to decode UTF-8 data coming from the DB
fr = []
for row in rows:
if not self.driver_supports_utf8 and isinstance(row, binary_type):
row = row.decode(self.encoding)
elif needs_utc and isinstance(row, datetime.datetime):
row = row.replace(tzinfo=timezone.utc)
fr.append(row)
return tuple(fr) | Decode data coming from the database if needed and convert rows to tuples
(pyodbc Rows are not sliceable). | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/base.py#L513-L531 | null | class CursorWrapper(object):
"""
A wrapper around the pyodbc's cursor that takes in account a) some pyodbc
DB-API 2.0 implementation and b) some common ODBC driver particularities.
"""
def __init__(self, cursor, driver_supports_utf8, encoding="", db_wrpr=None):
self.cursor = cursor
self.driver_supports_utf8 = driver_supports_utf8
self.last_sql = ''
self.last_params = ()
self.encoding = encoding
self.db_wrpr = db_wrpr
def close(self):
try:
self.cursor.close()
except Database.ProgrammingError:
pass
def format_sql(self, sql, n_params=None):
# pyodbc uses '?' instead of '%s' as parameter placeholder.
if n_params is not None:
try:
sql = sql % tuple('?' * n_params)
except Exception as e:
#Todo checkout whats happening here
pass
else:
if '%s' in sql:
sql = sql.replace('%s', '?')
return sql
def format_params(self, params):
fp = []
for p in params:
if isinstance(p, text_type):
fp.append(p)
elif isinstance(p, binary_type):
if not self.driver_supports_utf8:
fp.append(p.decode(self.encoding))
else:
fp.append(p)
elif isinstance(p, type(True)):
if p:
fp.append(1)
else:
fp.append(0)
else:
fp.append(p)
return tuple(fp)
def execute(self, sql, params=()):
self.last_sql = sql
#django-debug toolbar error
if params == None:
params = ()
sql = self.format_sql(sql, len(params))
params = self.format_params(params)
self.last_params = params
try:
return self.cursor.execute(sql, params)
except IntegrityError:
e = sys.exc_info()[1]
raise utils.IntegrityError(*e.args)
except DatabaseError:
e = sys.exc_info()[1]
raise utils.DatabaseError(*e.args)
def executemany(self, sql, params_list):
sql = self.format_sql(sql)
# pyodbc's cursor.executemany() doesn't support an empty param_list
if not params_list:
if '?' in sql:
return
else:
raw_pll = params_list
params_list = [self.format_params(p) for p in raw_pll]
try:
return self.cursor.executemany(sql, params_list)
except IntegrityError:
e = sys.exc_info()[1]
raise utils.IntegrityError(*e.args)
except DatabaseError:
e = sys.exc_info()[1]
raise utils.DatabaseError(*e.args)
def fetchone(self):
row = self.cursor.fetchone()
if row is not None:
return self.format_results(row)
return []
def fetchmany(self, chunk):
return [self.format_results(row) for row in self.cursor.fetchmany(chunk)]
def fetchall(self):
return [self.format_results(row) for row in self.cursor.fetchall()]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return False
# # MS SQL Server doesn't support explicit savepoint commits; savepoints are
# # implicitly committed with the transaction.
# # Ignore them.
def savepoint_commit(self, sid):
# if something is populating self.queries, include a fake entry to avoid
# issues with tests that use assertNumQueries.
if self.queries:
self.queries.append({
'sql': '-- RELEASE SAVEPOINT %s -- (because assertNumQueries)' % self.ops.quote_name(sid),
'time': '0.000',
})
|
fracpete/python-weka-wrapper | python/weka/core/tokenizers.py | TokenIterator.next | python | def next(self):
if not self.__has_more():
raise StopIteration()
else:
return javabridge.get_env().get_string(self.__next()) | Reads the next dataset row.
:return: the next row
:rtype: Instance | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/tokenizers.py#L42-L52 | null | class TokenIterator(object):
"""
Iterator for string tokens.
"""
def __init__(self, tokenizer):
"""
Initializes the iterator.
:param tokenizer: the tokenizer instance to use
:type tokenizer: Tokenizer
"""
self.tokenizer = tokenizer
self.__has_more = javabridge.make_call(self.tokenizer.jobject, "hasMoreElements", "()Z")
self.__next = javabridge.make_call(self.tokenizer.jobject, "nextElement", "()Ljava/lang/String;")
def __iter__(self):
"""
Returns itself.
"""
return self
|
fracpete/python-weka-wrapper | python/weka/associations.py | main | python | def main():
parser = argparse.ArgumentParser(
description='Executes an associator from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("associator", help="associator classname, e.g., weka.associations.Apriori")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional associator options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
associator = Associator(classname=parsed.associator)
if len(parsed.option) > 0:
associator.options = parsed.option
loader = converters.loader_for_file(parsed.train)
data = loader.load_file(parsed.train)
associator.build_associations(data)
print(str(associator))
except Exception, e:
print(e)
finally:
jvm.stop() | Runs a associator from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/associations.py#L605-L638 | null | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# associations.py
# Copyright (C) 2014-2018 Fracpete (pythonwekawrapper at gmail dot com)
import javabridge
import logging
import os
import sys
import argparse
import weka.core.jvm as jvm
import weka.core.converters as converters
from weka.core.classes import OptionHandler, JavaObject, join_options
from weka.core.capabilities import Capabilities
from weka.core.dataset import Attribute
from weka.core.types import string_array_to_list
# logging setup
logger = logging.getLogger("weka.associations")
class Item(JavaObject):
"""
Wrapper for weka.associations.Item class.
"""
def __init__(self, jobject):
"""
Initializes the wrapper.
:param jobject: the Item object to wrap
:type jobject: JB_Object
"""
if jobject is None:
raise Exception("No Item JB_Object provided!")
self.enforce_type(jobject, "weka.associations.Item")
super(Item, self).__init__(jobject)
def __compareto__(self, other):
"""
Return the comparison result with the other rule.
:param other: the other rule to compare with
:type other: Item
:return: the integer result, <0 is less than, =0 is equal to, >0 is greater than
:rtype: int
"""
return javabridge.call(self.jobject, "compareTo", "(Lweka/associations/Item;)I", other.jobject)
def __lt__(self, other):
"""
Checks self < other.
:param other: the item to compare with
:type other: Item
:return: whether less than the other item
:rtype: bool
"""
return self.__compareto__(other) < 0
def __le__(self, other):
"""
Checks self <= other.
:param other: the item to compare with
:type other: Item
:return: whether less or equal than the other item
:rtype: bool
"""
return self.__compareto__(other) <= 0
def decrease_frequency(self, frequency=None):
"""
Decreases the frequency.
:param frequency: the frequency to decrease by, 1 if None
:type frequency: int
"""
if frequency is None:
javabridge.call(self.jobject, "decreaseFrequency", "()V")
else:
javabridge.call(self.jobject, "decreaseFrequency", "(I)V", frequency)
def increase_frequency(self, frequency=None):
"""
Increases the frequency.
:param frequency: the frequency to increase by, 1 if None
:type frequency: int
"""
if frequency is None:
javabridge.call(self.jobject, "increaseFrequency", "()V")
else:
javabridge.call(self.jobject, "increaseFrequency", "(I)V", frequency)
def __eq__(self, other):
"""
Returns whether this object is the same as the other one.
:param other: the object to compare with
:type other: Item
:return: True if the same
:rtype: bool
"""
return javabridge.call(self.jobject, "equals", "(Ljava/lang/Object;)Z", other.jobject)
def __ge__(self, other):
"""
Checks self >= other.
:param other: the item to compare with
:type other: Item
:return: whether greater or equal than the other item
:rtype: bool
"""
return self.__compareto__(other) >= 0
def __gt__(self, other):
"""
Checks self > other.
:param other: the item to compare with
:type other: Item
:return: whether greater than the other item
:rtype: bool
"""
return self.__compareto__(other) > 0
def __str__(self):
"""
Returns the item as string.
:return: the item as string
:rtype: str
"""
return javabridge.call(self.jobject, "toString", "()Ljava/lang/String;")
@property
def frequency(self):
"""
Returns the frequency.
:return: the frequency
:rtype: int
"""
return javabridge.call(self.jobject, "getFrequency", "()I")
@property
def attribute(self):
"""
Returns the attribute.
:return: the attribute
:rtype: Attribute
"""
return Attribute(javabridge.call(self.jobject, "getAttribute", "()Lweka/core/Attribute;"))
@property
def comparison(self):
"""
Returns the comparison operator as string.
:return: the comparison iterator
:rtype: str
"""
return Attribute(javabridge.call(self.jobject, "getComparisonAsString", "()Ljava/lang/String;"))
@property
def item_value(self):
"""
Returns the item value as string.
:return: the item value
:rtype: str
"""
return Attribute(javabridge.call(self.jobject, "getItemValueAsString", "()Ljava/lang/String;"))
class AssociationRule(JavaObject):
"""
Wrapper for weka.associations.AssociationRule class.
"""
def __init__(self, jobject):
"""
Initializes the wrapper.
:param jobject: the AssociationRule object to wrap
:type jobject: JB_Object
"""
if jobject is None:
raise Exception("No AssociationRule JB_Object provided!")
self.enforce_type(jobject, "weka.associations.AssociationRule")
super(AssociationRule, self).__init__(jobject)
def __compareto__(self, other):
"""
Return the comparison result with the other rule.
:param other: the other rule to compare with
:type other: AssociationRule
:return: the integer result, <0 is less than, =0 is equal to, >0 is greater than
:rtype: int
"""
return javabridge.call(self.jobject, "compareTo", "(Lweka/associations/AssociationRule;)I", other.jobject)
def __lt__(self, other):
"""
Checks self < other.
:param other: the rule to compare with
:type other: AssociationRule
:return: whether less than the other rule
:rtype: bool
"""
return self.__compareto__(other) < 0
def __le__(self, other):
"""
Checks self <= other.
:param other: the rule to compare with
:type other: AssociationRule
:return: whether less or equal than the other rule
:rtype: bool
"""
return self.__compareto__(other) <= 0
def __eq__(self, other):
"""
Returns whether this object is the same as the other one.
:param other: the object to compare with
:type other: AssociationRule
:return: True if the same
:rtype: bool
"""
return javabridge.call(self.jobject, "equals", "(Ljava/lang/Object;)Z", other.jobject)
def __ge__(self, other):
"""
Checks self >= other.
:param other: the rule to compare with
:type other: AssociationRule
:return: whether greater or equal than the other rule
:rtype: bool
"""
return self.__compareto__(other) >= 0
def __gt__(self, other):
"""
Checks self > other.
:param other: the rule to compare with
:type other: AssociationRule
:return: whether greater than the other rule
:rtype: bool
"""
return self.__compareto__(other) > 0
@property
def consequence(self):
"""
Get the the consequence.
:return: the consequence, list of Item objects
:rtype: list
"""
items = javabridge.get_collection_wrapper(
javabridge.call(self.jobject, "getConsequence", "()Ljava/util/Collection;"))
result = []
for item in items:
result.append(Item(item))
return result
@property
def consequence_support(self):
"""
Get the support for the consequence.
:return: the support
:rtype: int
"""
return javabridge.call(self.jobject, "getConsequenceSupport", "()I")
@property
def premise(self):
"""
Get the the premise.
:return: the premise, list of Item objects
:rtype: list
"""
items = javabridge.get_collection_wrapper(
javabridge.call(self.jobject, "getPremise", "()Ljava/util/Collection;"))
result = []
for item in items:
result.append(Item(item))
return result
@property
def premise_support(self):
"""
Get the support for the premise.
:return: the support
:rtype: int
"""
return javabridge.call(self.jobject, "getPremiseSupport", "()I")
@property
def total_support(self):
"""
Get the total support.
:return: the support
:rtype: int
"""
return javabridge.call(self.jobject, "getTotalSupport", "()I")
@property
def total_transactions(self):
"""
Get the total transactions.
:return: the transactions
:rtype: int
"""
return javabridge.call(self.jobject, "getTotalTransactions", "()I")
@property
def metric_names(self):
"""
Returns the metric names for the rule.
:return: the metric names
:rtype: list
"""
return string_array_to_list(javabridge.call(self.jobject, "getMetricNamesForRule", "()[Ljava/lang/String;"))
@property
def metric_values(self):
"""
Returns the metric values for the rule.
:return: the metric values
:rtype: ndarray
"""
return javabridge.get_env().get_double_array_elements(
javabridge.call(self.jobject, "getMetricValuesForRule", "()[D"))
def metric_value(self, name):
"""
Returns the named metric value for the rule.
:param name: the name of the metric
:type name: str
:return: the metric value
:rtype: float
"""
return javabridge.call(self.jobject, "getNamedMetricValue", "(Ljava/lang/String;)D", name)
@property
def primary_metric_name(self):
"""
Returns the primary metric name for the rule.
:return: the metric name
:rtype: str
"""
return javabridge.call(self.jobject, "getPrimaryMetricName", "()Ljava/lang/String;")
@property
def primary_metric_value(self):
"""
Returns the primary metric value for the rule.
:return: the metric value
:rtype: float
"""
return javabridge.call(self.jobject, "getPrimaryMetricValue", "()D")
class AssociationRulesIterator(object):
"""
Iterator for weka.associations.AssociationRules class.
"""
def __init__(self, rules):
"""
Initializes with the rules.
:param rules: the rules to use
:type rules: AssociationRules
"""
self.rules = rules
self.index = 0
self.length = len(rules)
def __iter__(self):
"""
Returns itself.
"""
return self
def next(self):
"""
Returns the next rule.
:return: the next rule object
:rtype: AssociationRule
"""
if self.index < self.length:
index = self.index
self.index += 1
return self.rules[index]
else:
raise StopIteration()
class AssociationRules(JavaObject):
"""
Wrapper for weka.associations.AssociationRules class.
"""
def __init__(self, jobject):
"""
Initializes the wrapper.
:param jobject: the AssociationRules object to wrap
:type jobject: JB_Object
"""
if jobject is None:
raise Exception("No AssociationRules JB_Object provided!")
self.enforce_type(jobject, "weka.associations.AssociationRules")
super(AssociationRules, self).__init__(jobject)
def __len__(self):
"""
Returns the number of rules available.
:return: the number of rules
:rtype: int
"""
return javabridge.call(self.jobject, "getNumRules", "()I")
def __getitem__(self, item):
"""
Returns the specified item, using 0-based indices.
:param item: the 0-based index
:type item: int
:return: the AssociationRule
:rtype: AssociationRule
"""
rules = javabridge.get_collection_wrapper(
javabridge.call(self.jobject, "getRules", "()Ljava/util/List;"))
return AssociationRule(rules[item])
def __str__(self):
"""
Returns a short description of the rules.
:return: short description
:rtype: str
"""
return str(len(self)) + " rules generated by " + self.producer
def __iter__(self):
"""
Returns an iterator for the rules.
:return: the iterator
:rtype: AssociationRulesIterator
"""
return AssociationRulesIterator(self)
@property
def producer(self):
"""
Returns a string describing the producer that generated these rules.
:return: the producer
:rtype: str
"""
return javabridge.call(self.jobject, "getProducer", "()Ljava/lang/String;")
@producer.setter
def producer(self, producer):
""""
Sets the string describing the producer that generated these rules.
:param producer: the producer
:type producer: str
"""
javabridge.call(self.jobject, "setProducer", "(Ljava/lang/String;)V", producer)
class Associator(OptionHandler):
"""
Wrapper class for associators.
"""
def __init__(self, classname=None, jobject=None, options=None):
"""
Initializes the specified associator using either the classname or the supplied JB_Object.
:param classname: the classname of the associator
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = Associator.new_instance(classname)
self.enforce_type(jobject, "weka.associations.Associator")
super(Associator, self).__init__(jobject=jobject, options=options)
@property
def capabilities(self):
"""
Returns the capabilities of the associator.
:return: the capabilities
:rtype: Capabilities
"""
return Capabilities(javabridge.call(self.jobject, "getCapabilities", "()Lweka/core/Capabilities;"))
def build_associations(self, data):
"""
Builds the associator with the data.
:param data: the data to train the associator with
:type data: Instances
"""
javabridge.call(self.jobject, "buildAssociations", "(Lweka/core/Instances;)V", data.jobject)
def can_produce_rules(self):
"""
Checks whether association rules can be generated.
:return: whether scheme implements AssociationRulesProducer interface and
association rules can be generated
:rtype: bool
"""
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
return False
return javabridge.call(self.jobject, "canProduceRules", "()Z")
def association_rules(self):
"""
Returns association rules that were generated. Only if implements AssociationRulesProducer.
:return: the association rules that were generated
:rtype: AssociationRules
"""
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
return None
return AssociationRules(
javabridge.call(self.jobject, "getAssociationRules", "()Lweka/associations/AssociationRules;"))
@property
def rule_metric_names(self):
"""
Returns the rule metric names of the association rules. Only if implements AssociationRulesProducer.
:return: the metric names
:rtype: list
"""
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
return None
return string_array_to_list(
javabridge.call(self.jobject, "getRuleMetricNames", "()[Ljava/lang/String;"))
@classmethod
def make_copy(cls, associator):
"""
Creates a copy of the clusterer.
:param associator: the associator to copy
:type associator: Associator
:return: the copy of the associator
:rtype: Associator
"""
return Associator(
jobject=javabridge.static_call(
"weka/associations/AbstractAssociator", "makeCopy",
"(Lweka/associations/Associator;)Lweka/associations/Associator;", associator.jobject))
if __name__ == "__main__":
try:
main()
except Exception, ex:
print(ex)
|
fracpete/python-weka-wrapper | python/weka/associations.py | AssociationRulesIterator.next | python | def next(self):
if self.index < self.length:
index = self.index
self.index += 1
return self.rules[index]
else:
raise StopIteration() | Returns the next rule.
:return: the next rule object
:rtype: AssociationRule | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/associations.py#L419-L431 | null | class AssociationRulesIterator(object):
"""
Iterator for weka.associations.AssociationRules class.
"""
def __init__(self, rules):
"""
Initializes with the rules.
:param rules: the rules to use
:type rules: AssociationRules
"""
self.rules = rules
self.index = 0
self.length = len(rules)
def __iter__(self):
"""
Returns itself.
"""
return self
|
fracpete/python-weka-wrapper | python/weka/flow/control.py | ActorHandler._build_tree | python | def _build_tree(self, actor, content):
depth = actor.depth
row = ""
for i in xrange(depth - 1):
row += "| "
if depth > 0:
row += "|-"
name = actor.name
if name != actor.__class__.__name__:
name = actor.__class__.__name__ + " '" + name + "'"
row += name
quickinfo = actor.quickinfo
if quickinfo is not None:
row += " [" + quickinfo + "]"
content.append(row)
if isinstance(actor, ActorHandler):
for sub in actor.actors:
self._build_tree(sub, content) | Builds the tree for the given actor.
:param actor: the actor to process
:type actor: Actor
:param content: the rows of the tree collected so far
:type content: list | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/flow/control.py#L284-L310 | null | class ActorHandler(Actor):
"""
The ancestor for all actors that handle other actors.
"""
def __init__(self, name=None, config=None):
"""
Initializes the actor handler.
:param name: the name of the actor handler
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(ActorHandler, self).__init__(name=name, config=config)
self._director = self.new_director()
if not classes.has_dict_handler("ActorHandler"):
classes.register_dict_handler("ActorHandler", ActorHandler.from_dict)
def new_director(self):
"""
Creates the director to use for handling the sub-actors.
:return: the director instance
:rtype: Director
"""
raise Exception("Not implemented!")
def default_actors(self):
"""
Returns the default actors to use.
:return: the default actors, if any
:rtype: list
"""
return []
def check_actors(self, actors):
"""
Performs checks on the actors that are to be used. Raises an exception if invalid setup.
:param actors: the actors to check
:type actors: list
"""
pass
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(ActorHandler, self).fix_config(options)
opt = "actors"
if opt not in options:
options[opt] = self.default_actors()
if opt not in self.help:
self.help[opt] = "The list of sub-actors that this actor manages."
return options
def to_dict(self):
"""
Returns a dictionary that represents this object, to be used for JSONification.
:return: the object dictionary
:rtype: dict
"""
result = super(ActorHandler, self).to_dict()
result["type"] = "ActorHandler"
del result["config"]["actors"]
result["actors"] = []
for actor in self.actors:
result["actors"].append(actor.to_dict())
return result
@classmethod
def from_dict(cls, d):
"""
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
"""
result = super(ActorHandler, cls).from_dict(d)
if "actors" in d:
l = d["actors"]
for e in l:
if u"type" in e:
typestr = e[u"type"]
else:
typestr = e["type"]
result.actors.append(classes.get_dict_handler(typestr)(e))
return result
@property
def actors(self):
"""
Obtains the currently set sub-actors.
:return: the sub-actors
:rtype: list
"""
result = self.config["actors"]
if result is None:
result = []
return result
@actors.setter
def actors(self, actors):
"""
Sets the sub-actors of the actor.
:param actors: the sub-actors
:type actors: list
"""
if actors is None:
actors = self.default_actors()
self.check_actors(actors)
self.config["actors"] = actors
@property
def active(self):
"""
Returns the count of non-skipped actors.
:return: the count
:rtype: int
"""
result = 0
for actor in self.actors:
if not actor.skip:
result += 1
return result
@property
def first_active(self):
"""
Returns the first non-skipped actor.
:return: the first active actor, None if not available
:rtype: Actor
"""
result = None
for actor in self.actors:
if not actor.skip:
result = actor
break
return result
@property
def last_active(self):
"""
Returns the last non-skipped actor.
:return: the last active actor, None if not available
:rtype: Actor
"""
result = None
for actor in reversed(self.actors):
if not actor.skip:
result = actor
break
return result
def index_of(self, name):
"""
Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int
"""
result = -1
for index, actor in enumerate(self.actors):
if actor.name == name:
result = index
break
return result
def update_parent(self):
"""
Updates the parent in its sub-actors.
"""
for actor in self.actors:
actor.parent = self
def setup(self):
"""
Configures the actor before execution.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super(ActorHandler, self).setup()
if result is None:
self.update_parent()
try:
self.check_actors(self.actors)
except Exception, e:
result = str(e)
if result is None:
for actor in self.actors:
name = actor.name
newname = actor.unique_name(actor.name)
if name != newname:
actor.name = newname
if result is None:
for actor in self.actors:
if actor.skip:
continue
result = actor.setup()
if result is not None:
break
if result is None:
result = self._director.setup()
return result
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
return self._director.execute()
def stop_execution(self):
"""
Triggers the stopping of the actor.
"""
self._director.stop_execution()
def wrapup(self):
"""
Finishes up after execution finishes, does not remove any graphical output.
"""
for actor in self.actors:
if actor.skip:
continue
actor.wrapup()
super(ActorHandler, self).wrapup()
def cleanup(self):
"""
Destructive finishing up after execution stopped.
"""
for actor in self.actors:
if actor.skip:
continue
actor.cleanup()
super(ActorHandler, self).cleanup()
@property
def tree(self):
"""
Returns a tree representation of this sub-flow.
:return: the tree
:rtype: str
"""
content = []
self._build_tree(self, content)
return '\n'.join(content)
|
fracpete/python-weka-wrapper | python/weka/flow/control.py | SequentialDirector.check_actors | python | def check_actors(self):
actors = []
for actor in self.owner.actors:
if actor.skip:
continue
actors.append(actor)
if len(actors) == 0:
return
if not self.allow_source and base.is_source(actors[0]):
raise Exception("Actor '" + actors[0].full_name + "' is a source, but no sources allowed!")
for i in xrange(1, len(actors)):
if not isinstance(actors[i], InputConsumer):
raise Exception("Actor does not accept any input: " + actors[i].full_name) | Checks the actors of the owner. Raises an exception if invalid. | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/flow/control.py#L508-L523 | null | class SequentialDirector(Director, Stoppable):
"""
Director for sequential execution of actors.
"""
def __init__(self, owner):
"""
Initializes the director.
:param owner: the owning actor
:type owner: Actor
"""
super(SequentialDirector, self).__init__(owner)
self._stopping = False
self._stopped = False
self._allow_source = False
self._record_output = True
self._recorded_output = []
@property
def allow_source(self):
"""
Obtains whether to allow a source actor.
:return: true if to allow
:rtype: bool
"""
return self._allow_source
@allow_source.setter
def allow_source(self, allow):
"""
Sets whether to allow a source actor.
:param allow: true if to allow
:type allow: bool
"""
self._allow_source = allow
@property
def record_output(self):
"""
Obtains whether to record the output of the last actor.
:return: true if to record
:rtype: bool
"""
return self._record_output
@record_output.setter
def record_output(self, record):
"""
Sets whether to record the output of the last actor.
:param record: true if to record
:type record: bool
"""
self._record_output = record
@property
def recorded_output(self):
"""
Obtains the recorded output.
:return: the output
:rtype: list
"""
return self._recorded_output
def stop_execution(self):
"""
Triggers the stopping of the object.
"""
if not (self._stopping or self._stopped):
for actor in self.owner.actors:
actor.stop_execution()
self._stopping = True
def is_stopping(self):
"""
Returns whether the director is in the process of stopping.
:return:
"""
return self._stopping
def is_stopped(self):
"""
Returns whether the object has been stopped.
:return: whether stopped
:rtype: bool
"""
return self._stopped
def check_owner(self, owner):
"""
Checks the owner. Raises an exception if invalid.
:param owner: the owner to check
:type owner: Actor
"""
if not isinstance(owner, ActorHandler):
raise Exception("Owner is not an ActorHandler: " + owner.__name__)
def setup(self):
"""
Performs some checks.
:return: None if successful, otherwise error message.
:rtype: str
"""
result = super(SequentialDirector, self).setup()
if result is None:
try:
self.check_actors()
except Exception, e:
result = str(e)
return result
def do_execute(self):
"""
Actual execution of the director.
:return: None if successful, otherwise error message
:rtype: str
"""
self._stopped = False
self._stopping = False
not_finished_actor = self.owner.first_active
pending_actors = []
finished = False
actor_result = None
while not (self.is_stopping() or self.is_stopped()) and not finished:
# determing starting point of next iteration
if len(pending_actors) > 0:
start_index = self.owner.index_of(pending_actors[-1].name)
else:
start_index = self.owner.index_of(not_finished_actor.name)
not_finished_actor = None
# iterate over actors
token = None
last_active = -1
if self.owner.active > 0:
last_active = self.owner.last_active.index
for i in xrange(start_index, last_active + 1):
# do we have to stop the execution?
if self.is_stopped() or self.is_stopping():
break
curr = self.owner.actors[i]
if curr.skip:
continue
# no token? get pending one or produce new one
if token is None:
if isinstance(curr, OutputProducer) and curr.has_output():
pending_actors.pop()
else:
actor_result = curr.execute()
if actor_result is not None:
self.owner.logger.error(
curr.full_name + " generated following error output:\n" + actor_result)
break
if isinstance(curr, OutputProducer) and curr.has_output():
token = curr.output()
else:
token = None
# still more to come?
if isinstance(curr, OutputProducer) and curr.has_output():
pending_actors.append(curr)
else:
# process token
curr.input = token
actor_result = curr.execute()
if actor_result is not None:
self.owner.logger.error(
curr.full_name + " generated following error output:\n" + actor_result)
break
# was a new token produced?
if isinstance(curr, OutputProducer):
if curr.has_output():
token = curr.output()
else:
token = None
# still more to come?
if curr.has_output():
pending_actors.append(curr)
else:
token = None
# token from last actor generated? -> store
if (i == self.owner.last_active.index) and (token is not None):
if self._record_output:
self._recorded_output.append(token)
# no token produced, ignore rest of actors
if isinstance(curr, OutputProducer) and (token is None):
break
# all actors finished?
finished = (not_finished_actor is None) and (len(pending_actors) == 0)
return actor_result
|
fracpete/python-weka-wrapper | python/weka/flow/control.py | Flow.save | python | def save(cls, flow, fname):
result = None
try:
f = open(fname, 'w')
f.write(flow.to_json())
f.close()
except Exception, e:
result = str(e)
return result | Saves the flow to a JSON file.
:param flow: the flow to save
:type flow: Flow
:param fname: the file to load
:type fname: str
:return: None if successful, otherwise error message
:rtype: str | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/flow/control.py#L709-L727 | null | class Flow(ActorHandler, StorageHandler):
"""
Root actor for defining and executing flows.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sequence.
:param name: the name of the sequence
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(Flow, self).__init__(name=name, config=config)
self._storage = {}
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Root actor for defining and executing flows."
def new_director(self):
"""
Creates the director to use for handling the sub-actors.
:return: the director instance
:rtype: Director
"""
result = SequentialDirector(self)
result.record_output = False
result.allow_source = True
return result
def check_actors(self, actors):
"""
Performs checks on the actors that are to be used. Raises an exception if invalid setup.
:param actors: the actors to check
:type actors: list
"""
super(Flow, self).check_actors(actors)
actor = self.first_active
if (actor is not None) and not base.is_source(actor):
raise Exception("First active actor is not a source: " + actor.full_name)
@property
def storage(self):
"""
Returns the internal storage.
:return: the internal storage
:rtype: dict
"""
return self._storage
@classmethod
def load(cls, fname):
"""
Loads the flow from a JSON file.
:param fname: the file to load
:type fname: str
:return: the flow
:rtype: Flow
"""
with open(fname) as f:
content = f.readlines()
return Flow.from_json(''.join(content))
@classmethod
|
fracpete/python-weka-wrapper | python/weka/flow/control.py | BranchDirector.setup | python | def setup(self):
result = super(BranchDirector, self).setup()
if result is None:
try:
self.check_actors()
except Exception, e:
result = str(e)
return result | Performs some checks.
:return: None if successful, otherwise error message.
:rtype: str | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/flow/control.py#L1076-L1089 | null | class BranchDirector(Director, Stoppable):
"""
Director for the Branch actor.
"""
def __init__(self, owner):
"""
Initializes the director.
:param owner: the owning actor
:type owner: Actor
"""
super(BranchDirector, self).__init__(owner)
self._stopping = False
self._stopped = False
def stop_execution(self):
"""
Triggers the stopping of the object.
"""
if not (self._stopping or self._stopped):
for actor in self.owner.actors:
actor.stop_execution()
self._stopping = True
def is_stopping(self):
"""
Returns whether the director is in the process of stopping.
:return:
"""
return self._stopping
def is_stopped(self):
"""
Returns whether the object has been stopped.
:return: whether stopped
:rtype: bool
"""
return self._stopped
def check_owner(self, owner):
"""
Checks the owner. Raises an exception if invalid.
:param owner: the owner to check
:type owner: Actor
"""
if not isinstance(owner, Branch):
raise Exception("Owner is not a Branch: " + owner.__name__)
def check_actors(self):
"""
Checks the actors of the owner. Raises an exception if invalid.
"""
actors = []
for actor in self.owner.actors:
if actor.skip:
continue
actors.append(actor)
if len(actors) == 0:
return
for actor in actors:
if not isinstance(actor, InputConsumer):
raise Exception("Actor does not accept any input: " + actor.full_name)
def do_execute(self):
"""
Actual execution of the director.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
self._stopped = False
self._stopping = False
for actor in self.owner.actors:
if self.is_stopping() or self.is_stopped():
break
actor.input = self.owner.input
result = actor.execute()
if result is not None:
break
return result
|
fracpete/python-weka-wrapper | python/weka/attribute_selection.py | AttributeSelection.ranked_attributes | python | def ranked_attributes(self):
matrix = javabridge.call(self.jobject, "rankedAttributes", "()[[D")
if matrix is None:
return None
else:
return arrays.double_matrix_to_ndarray(matrix) | Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/attribute_selection.py#L259-L270 | null | class AttributeSelection(JavaObject):
"""
Performs attribute selection using search and evaluation algorithms.
"""
def __init__(self):
"""
Initializes the attribute selection.
"""
jobject = AttributeSelection.new_instance("weka.attributeSelection.AttributeSelection")
super(AttributeSelection, self).__init__(jobject)
def evaluator(self, evaluator):
"""
Sets the evaluator to use.
:param evaluator: the evaluator to use.
:type evaluator: ASEvaluation
"""
javabridge.call(self.jobject, "setEvaluator", "(Lweka/attributeSelection/ASEvaluation;)V", evaluator.jobject)
def search(self, search):
"""
Sets the search algorithm to use.
:param search: the search algorithm
:type search: ASSearch
"""
javabridge.call(self.jobject, "setSearch", "(Lweka/attributeSelection/ASSearch;)V", search.jobject)
def folds(self, folds):
"""
Sets the number of folds to use for cross-validation.
:param folds: the number of folds
:type folds: int
"""
javabridge.call(self.jobject, "setFolds", "(I)V", folds)
def ranking(self, ranking):
"""
Sets whether to perform a ranking, if possible.
:param ranking: whether to perform a ranking
:type ranking: bool
"""
javabridge.call(self.jobject, "setRanking", "(Z)V", ranking)
def seed(self, seed):
"""
Sets the seed for cross-validation.
:param seed: the seed value
:type seed: int
"""
javabridge.call(self.jobject, "setSeed", "(I)V", seed)
def crossvalidation(self, crossvalidation):
"""
Sets whether to perform cross-validation.
:param crossvalidation: whether to perform cross-validation
:type crossvalidation: bool
"""
javabridge.call(self.jobject, "setXval", "(Z)V", crossvalidation)
def select_attributes(self, instances):
"""
Performs attribute selection on the given dataset.
:param instances: the data to process
:type instances: Instances
"""
javabridge.call(self.jobject, "SelectAttributes", "(Lweka/core/Instances;)V", instances.jobject)
def select_attributes_cv_split(self, instances):
"""
Performs attribute selection on the given cross-validation split.
:param instances: the data to process
:type instances: Instances
"""
javabridge.call(self.jobject, "selectAttributesCVSplit", "(Lweka/core/Instances;)V", instances.jobject)
@property
def selected_attributes(self):
"""
Returns the selected attributes from the last run.
:return: the Numpy array of 0-based indices
:rtype: ndarray
"""
array = javabridge.call(self.jobject, "selectedAttributes", "()[I")
if array is None:
return None
else:
return javabridge.get_env().get_int_array_elements(array)
@property
def results_string(self):
"""
Generates a results string from the last attribute selection.
:return: the results string
:rtype: str
"""
return javabridge.call(self.jobject, "toResultsString", "()Ljava/lang/String;")
@property
def cv_results(self):
"""
Generates a results string from the last cross-validation attribute selection.
:return: the results string
:rtype: str
"""
return javabridge.call(self.jobject, "CVResultsString", "()Ljava/lang/String;")
@property
def number_attributes_selected(self):
"""
Returns the number of attributes that were selected.
:return: the number of attributes
:rtype: int
"""
return javabridge.call(self.jobject, "numberAttributesSelected", "()I")
@property
def reduce_dimensionality(self, data):
"""
Reduces the dimensionality of the provided Instance or Instances object.
:param data: the data to process
:type data: Instances
:return: the reduced dataset
:rtype: Instances
"""
if type(data) is Instance:
return Instance(
javabridge.call(
self.jobject, "reduceDimensionality",
"(Lweka/core/Instance;)Lweka/core/Instance;", data.jobject))
else:
return Instances(
javabridge.call(
self.jobject, "reduceDimensionality",
"(Lweka/core/Instances;)Lweka/core/Instances;", data.jobject))
@classmethod
def attribute_selection(cls, evaluator, args):
"""
Performs attribute selection using the given attribute evaluator and options.
:param evaluator: the evaluator to use
:type evaluator: ASEvaluation
:param args: the command-line args for the attribute selection
:type args: list
:return: the results string
:rtype: str
"""
return javabridge.static_call(
"Lweka/attributeSelection/AttributeSelection;", "SelectAttributes",
"(Lweka/attributeSelection/ASEvaluation;[Ljava/lang/String;)Ljava/lang/String;",
evaluator.jobject, args)
|
fracpete/python-weka-wrapper | python/weka/core/classes.py | JavaArrayIterator.next | python | def next(self):
if self.index < self.length:
index = self.index
self.index += 1
return self.data[index]
else:
raise StopIteration() | Returns the next element from the array.
:return: the next array element object, wrapped as JavaObject if not null
:rtype: JavaObject or None | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/classes.py#L755-L767 | null | class JavaArrayIterator(object):
"""
Iterator for elements in a Java array.
"""
def __init__(self, data):
"""
:param data: the Java array to iterate over
:type data: JavaArray
"""
self.data = data
self.index = 0
self.length = len(data)
def __iter__(self):
"""
Returns itself.
"""
return self
|
fracpete/python-weka-wrapper | python/weka/core/classes.py | OptionHandler.options | python | def options(self):
if self.is_optionhandler:
return types.string_array_to_list(javabridge.call(self.jobject, "getOptions", "()[Ljava/lang/String;"))
else:
return [] | Obtains the currently set options as list.
:return: the list of options
:rtype: list | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/classes.py#L1081-L1091 | [
"def string_array_to_list(a):\n \"\"\"\n Turns the Java string array into Python unicode string list.\n\n :param a: the string array to convert\n :type a: JB_Object\n :return: the string list\n :rtype: list\n \"\"\"\n result = []\n length = javabridge.get_env().get_array_length(a)\n wr... | class OptionHandler(JavaObject, Configurable):
"""
Ancestor for option-handling classes.
Classes should implement the weka.core.OptionHandler interface to have any effect.
"""
def __init__(self, jobject, options=None):
"""
Initializes the wrapper with the specified Java object.
:param jobject: the java object to wrap
:type jobject: JB_Object
:param options: the options to set
:type options: list
"""
super(OptionHandler, self).__init__(jobject)
self.is_optionhandler = OptionHandler.check_type(jobject, "weka.core.OptionHandler")
if (options is not None) and (len(options) > 0):
self.options = options
# we have to manually instantiate some objects, since multiple inheritance doesn't call
# Configurable constructor
self._logger = None
self._help = {}
self._config = self.fix_config({})
if not has_dict_handler("OptionHandler"):
register_dict_handler("OptionHandler", OptionHandler.from_dict)
def global_info(self):
"""
Returns the globalInfo() result, None if not available.
:rtypes: str
"""
try:
return javabridge.call(self.jobject, "globalInfo", "()Ljava/lang/String;")
except JavaException:
return None
def description(self):
"""
Returns a description of the object.
:return: the description
:rtype: str
"""
return self.global_info()
@property
@options.setter
def options(self, options):
"""
Sets the command-line options (as list).
:param options: the list of command-line options to set
:type options: list
"""
if self.is_optionhandler:
javabridge.call(self.jobject, "setOptions", "([Ljava/lang/String;)V", types.string_list_to_array(options))
def to_commandline(self):
"""
Generates a commandline string from the JavaObject instance.
:return: the commandline string
:rtype: str
"""
return javabridge.static_call(
"Lweka/core/Utils;", "toCommandLine",
"(Ljava/lang/Object;)Ljava/lang/String;",
self.jobject)
def to_help(self):
"""
Returns a string that contains the 'global_info' text and the options.
:return: the generated help string
:rtype: str
"""
result = []
result.append(self.classname)
result.append("=" * len(self.classname))
result.append("")
result.append("DESCRIPTION")
result.append("")
result.append(self.global_info())
result.append("")
result.append("OPTIONS")
result.append("")
options = javabridge.call(self.jobject, "listOptions", "()Ljava/util/Enumeration;")
enum = javabridge.get_enumeration_wrapper(options)
while enum.hasMoreElements():
opt = Option(enum.nextElement())
result.append(opt.synopsis)
result.append(opt.description)
result.append("")
return '\n'.join(result)
def __str__(self):
"""
Calls the toString() method of the java object.
:return: the result of the toString() method
:rtype: str
"""
return javabridge.to_string(self.jobject)
def to_dict(self):
"""
Returns a dictionary that represents this object, to be used for JSONification.
:return: the object dictionary
:rtype: dict
"""
result = super(OptionHandler, self).to_dict()
result["type"] = "OptionHandler"
result["options"] = join_options(self.options)
return result
@classmethod
def from_dict(cls, d):
"""
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
"""
result = OptionHandler(cls.new_instance(d["class"]))
result.options = split_options(d["options"])
return result
|
fracpete/python-weka-wrapper | python/weka/core/classes.py | OptionHandler.options | python | def options(self, options):
if self.is_optionhandler:
javabridge.call(self.jobject, "setOptions", "([Ljava/lang/String;)V", types.string_list_to_array(options)) | Sets the command-line options (as list).
:param options: the list of command-line options to set
:type options: list | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/classes.py#L1094-L1102 | [
"def string_array_to_list(a):\n \"\"\"\n Turns the Java string array into Python unicode string list.\n\n :param a: the string array to convert\n :type a: JB_Object\n :return: the string list\n :rtype: list\n \"\"\"\n result = []\n length = javabridge.get_env().get_array_length(a)\n wr... | class OptionHandler(JavaObject, Configurable):
"""
Ancestor for option-handling classes.
Classes should implement the weka.core.OptionHandler interface to have any effect.
"""
def __init__(self, jobject, options=None):
"""
Initializes the wrapper with the specified Java object.
:param jobject: the java object to wrap
:type jobject: JB_Object
:param options: the options to set
:type options: list
"""
super(OptionHandler, self).__init__(jobject)
self.is_optionhandler = OptionHandler.check_type(jobject, "weka.core.OptionHandler")
if (options is not None) and (len(options) > 0):
self.options = options
# we have to manually instantiate some objects, since multiple inheritance doesn't call
# Configurable constructor
self._logger = None
self._help = {}
self._config = self.fix_config({})
if not has_dict_handler("OptionHandler"):
register_dict_handler("OptionHandler", OptionHandler.from_dict)
def global_info(self):
"""
Returns the globalInfo() result, None if not available.
:rtypes: str
"""
try:
return javabridge.call(self.jobject, "globalInfo", "()Ljava/lang/String;")
except JavaException:
return None
def description(self):
"""
Returns a description of the object.
:return: the description
:rtype: str
"""
return self.global_info()
@property
def options(self):
"""
Obtains the currently set options as list.
:return: the list of options
:rtype: list
"""
if self.is_optionhandler:
return types.string_array_to_list(javabridge.call(self.jobject, "getOptions", "()[Ljava/lang/String;"))
else:
return []
@options.setter
def to_commandline(self):
"""
Generates a commandline string from the JavaObject instance.
:return: the commandline string
:rtype: str
"""
return javabridge.static_call(
"Lweka/core/Utils;", "toCommandLine",
"(Ljava/lang/Object;)Ljava/lang/String;",
self.jobject)
def to_help(self):
"""
Returns a string that contains the 'global_info' text and the options.
:return: the generated help string
:rtype: str
"""
result = []
result.append(self.classname)
result.append("=" * len(self.classname))
result.append("")
result.append("DESCRIPTION")
result.append("")
result.append(self.global_info())
result.append("")
result.append("OPTIONS")
result.append("")
options = javabridge.call(self.jobject, "listOptions", "()Ljava/util/Enumeration;")
enum = javabridge.get_enumeration_wrapper(options)
while enum.hasMoreElements():
opt = Option(enum.nextElement())
result.append(opt.synopsis)
result.append(opt.description)
result.append("")
return '\n'.join(result)
def __str__(self):
"""
Calls the toString() method of the java object.
:return: the result of the toString() method
:rtype: str
"""
return javabridge.to_string(self.jobject)
def to_dict(self):
"""
Returns a dictionary that represents this object, to be used for JSONification.
:return: the object dictionary
:rtype: dict
"""
result = super(OptionHandler, self).to_dict()
result["type"] = "OptionHandler"
result["options"] = join_options(self.options)
return result
@classmethod
def from_dict(cls, d):
"""
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
"""
result = OptionHandler(cls.new_instance(d["class"]))
result.options = split_options(d["options"])
return result
|
fracpete/python-weka-wrapper | python/weka/plot/clusterers.py | plot_cluster_assignments | python | def plot_cluster_assignments(evl, data, atts=None, inst_no=False, size=10, title=None, outfile=None, wait=True):
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
fig = plt.figure()
if data.class_index == -1:
c = None
else:
c = []
for i in xrange(data.num_instances):
inst = data.get_instance(i)
c.append(inst.get_value(inst.class_index))
if atts is None:
atts = []
for i in xrange(data.num_attributes):
atts.append(i)
num_plots = len(atts)
if inst_no:
num_plots += 1
clusters = evl.cluster_assignments
for index, att in enumerate(atts):
x = data.values(att)
ax = fig.add_subplot(
1, num_plots, index + 1)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title(data.attribute(att).name)
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if inst_no:
x = []
for i in xrange(data.num_instances):
x.append(i+1)
ax = fig.add_subplot(
1, num_plots, num_plots)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title("Instance number")
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if title is None:
title = data.relationname
fig.canvas.set_window_title(title)
plt.draw()
if not outfile is None:
plt.savefig(outfile)
if wait:
plt.show() | Plots the cluster assignments against the specified attributes.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evl: the cluster evaluation to obtain the cluster assignments from
:type evl: ClusterEvaluation
:param data: the dataset the clusterer was evaluated against
:type data: Instances
:param atts: the list of attribute indices to plot, None for all
:type atts: list
:param inst_no: whether to include a fake attribute with the instance number
:type inst_no: bool
:param size: the size of the circles in point
:type size: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/plot/clusterers.py#L28-L111 | null | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# clusterers.py
# Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com)
import logging
import weka.plot as plot
if plot.matplotlib_available:
import matplotlib.pyplot as plt
from weka.core.dataset import Instances
from weka.clusterers import ClusterEvaluation
# logging setup
logger = logging.getLogger(__name__)
|
fracpete/python-weka-wrapper | python/weka/core/serialization.py | write_all | python | def write_all(filename, jobjects):
array = javabridge.get_env().make_object_array(len(jobjects), javabridge.get_env().find_class("java/lang/Object"))
for i in xrange(len(jobjects)):
obj = jobjects[i]
if isinstance(obj, JavaObject):
obj = obj.jobject
javabridge.get_env().set_object_array_element(array, i, obj)
javabridge.static_call(
"Lweka/core/SerializationHelper;", "writeAll",
"(Ljava/lang/String;[Ljava/lang/Object;)V",
filename, array) | Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/serialization.py#L104-L122 | null | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# serialization.py
# Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com)
import javabridge
import logging
import weka.core.classes as classes
from weka.core.classes import JavaObject
from javabridge.jutil import JavaException
# logging setup
logger = logging.getLogger(__name__)
def deepcopy(obj):
"""
Creates a deep copy of the JavaObject (or derived class) or JB_Object.
:param obj: the object to create a copy of
:type obj: object
:return: the copy, None if failed to copy
:rtype: object
"""
if isinstance(obj, JavaObject):
wrapped = True
jobject = obj.jobject
else:
wrapped = False
jobject = obj
try:
serialized = javabridge.make_instance("weka/core/SerializedObject", "(Ljava/lang/Object;)V", jobject)
jcopy = javabridge.call(serialized, "getObject", "()Ljava/lang/Object;")
if wrapped:
jcopy = obj.__class__(jobject=jcopy)
return jcopy
except JavaException, e:
print("Failed to create copy of " + classes.get_classname(obj) + ": " + str(e))
return None
def read(filename):
"""
Reads the serialized object from disk. Caller must wrap object in appropriate Python wrapper class.
:param filename: the file with the serialized object
:type filename: str
:return: the JB_Object
:rtype: JB_Object
"""
return javabridge.static_call(
"Lweka/core/SerializationHelper;", "read",
"(Ljava/lang/String;)Ljava/lang/Object;",
filename)
def read_all(filename):
"""
Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes.
:param filename: the file with the serialized objects
:type filename: str
:return: the list of JB_OBjects
:rtype: list
"""
array = javabridge.static_call(
"Lweka/core/SerializationHelper;", "readAll",
"(Ljava/lang/String;)[Ljava/lang/Object;",
filename)
if array is None:
return None
else:
return javabridge.get_env().get_object_array_elements(array)
def write(filename, jobject):
"""
Serializes the object to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobject: the object to serialize
:type jobject: JB_Object or JavaObject
"""
if isinstance(jobject, JavaObject):
jobject = jobject.jobject
javabridge.static_call(
"Lweka/core/SerializationHelper;", "write",
"(Ljava/lang/String;Ljava/lang/Object;)V",
filename, jobject)
|
fracpete/python-weka-wrapper | python/weka/filters.py | main | python | def main():
parser = argparse.ArgumentParser(
description='Executes a filter from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-i", metavar="input1", dest="input1", required=True, help="input file 1")
parser.add_argument("-o", metavar="output1", dest="output1", required=True, help="output file 1")
parser.add_argument("-r", metavar="input2", dest="input2", help="input file 2")
parser.add_argument("-s", metavar="output2", dest="output2", help="output file 2")
parser.add_argument("-c", metavar="classindex", default="-1", dest="classindex",
help="1-based class attribute index")
parser.add_argument("filter", help="filter classname, e.g., weka.filters.AllFilter")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional filter options")
parsed = parser.parse_args()
if parsed.input2 is None and parsed.output2 is not None:
raise Exception("No second input file provided ('-r ...')!")
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.input1 is not None:
params.extend(["-i", parsed.input1])
if parsed.output1 is not None:
params.extend(["-o", parsed.output1])
if parsed.input2 is not None:
params.extend(["-r", parsed.input2])
if parsed.output2 is not None:
params.extend(["-s", parsed.output2])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
flter = Filter(parsed.filter)
if len(parsed.option) > 0:
flter.options = parsed.option
loader = Loader(classname="weka.core.converters.ArffLoader")
in1 = loader.load_file(parsed.input1)
cls = parsed.classindex
if str(parsed.classindex) == "first":
cls = "0"
if str(parsed.classindex) == "last":
cls = str(in1.num_attributes - 1)
in1.class_index = int(cls)
flter.inputformat(in1)
out1 = flter.filter(in1)
saver = Saver(classname="weka.core.converters.ArffSaver")
saver.save_file(out1, parsed.output1)
if parsed.input2 is not None:
in2 = loader.load_file(parsed.input2)
in2.class_index = int(cls)
out2 = flter.filter(in2)
saver.save_file(out2, parsed.output2)
except Exception, e:
print(e)
finally:
jvm.stop() | Runs a filter from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/filters.py#L316-L380 | null | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# filters.py
# Copyright (C) 2014-2019 Fracpete (pythonwekawrapper at gmail dot com)
import javabridge
import logging
import os
import sys
import argparse
import weka.core.jvm as jvm
from weka.core.classes import OptionHandler, join_options
from weka.core.capabilities import Capabilities
from weka.core.converters import Loader
from weka.core.converters import Saver
from weka.core.dataset import Instances
from weka.core.dataset import Instance
from weka.core.stemmers import Stemmer
from weka.core.stopwords import Stopwords
from weka.core.tokenizers import Tokenizer
# logging setup
logger = logging.getLogger("weka.filters")
class Filter(OptionHandler):
"""
Wrapper class for filters.
"""
def __init__(self, classname="weka.filters.AllFilter", jobject=None, options=None):
"""
Initializes the specified filter using either the classname or the supplied JB_Object.
:param classname: the classname of the filter
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = Filter.new_instance(classname)
self.enforce_type(jobject, "weka.filters.Filter")
super(Filter, self).__init__(jobject=jobject, options=options)
self.__input = javabridge.make_call(self.jobject, "input", "(Lweka/core/Instance;)Z")
self.__batchfinished = javabridge.make_call(self.jobject, "batchFinished", "()Z")
self.__output = javabridge.make_call(self.jobject, "output", "()Lweka/core/Instance;")
self.__outputformat = javabridge.make_call(self.jobject, "getOutputFormat", "()Lweka/core/Instances;")
def capabilities(self):
"""
Returns the capabilities of the filter.
:return: the capabilities
:rtype: Capabilities
"""
return Capabilities(javabridge.call(self.jobject, "getCapabilities", "()Lweka/core/Capabilities;"))
def inputformat(self, data):
"""
Sets the input format.
:param data: the data to use as input
:type data: Instances
"""
return javabridge.call(self.jobject, "setInputFormat", "(Lweka/core/Instances;)Z", data.jobject)
def input(self, inst):
"""
Inputs the Instance.
:param inst: the instance to filter
:type inst: Instance
:return: True if filtered can be collected from output
:rtype: bool
"""
return self.__input(inst.jobject)
def batch_finished(self):
"""
Signals the filter that the batch of data has finished.
:return: True if instances can be collected from the output
:rtype: bool
"""
return self.__batchfinished()
def outputformat(self):
"""
Returns the output format.
:return: the output format
:rtype: Instances
"""
inst = self.__outputformat()
if inst is None:
return None
else:
return Instances(inst)
def output(self):
"""
Outputs the filtered Instance.
:return: the filtered instance
:rtype: an Instance object
"""
return Instance(jobject=self.__output())
def filter(self, data):
"""
Filters the dataset(s). When providing a list, this can be used to create compatible train/test sets,
since the filter only gets initialized with the first dataset and all subsequent datasets get transformed
using the same setup.
NB: inputformat(Instances) must have been called beforehand.
:param data: the Instances to filter
:type data: Instances or list of Instances
:return: the filtered Instances object(s)
:rtype: Instances or list of Instances
"""
if isinstance(data, list):
result = []
for d in data:
result.append(Instances(javabridge.static_call(
"Lweka/filters/Filter;", "useFilter",
"(Lweka/core/Instances;Lweka/filters/Filter;)Lweka/core/Instances;",
d.jobject, self.jobject)))
return result
else:
return Instances(javabridge.static_call(
"Lweka/filters/Filter;", "useFilter",
"(Lweka/core/Instances;Lweka/filters/Filter;)Lweka/core/Instances;",
data.jobject, self.jobject))
def to_source(self, classname, data):
"""
Returns the model as Java source code if the classifier implements weka.filters.Sourcable.
:param classname: the classname for the generated Java code
:type classname: str
:param data: the dataset used for initializing the filter
:type data: Instances
:return: the model as source code string
:rtype: str
"""
if not self.check_type(self.jobject, "weka.filters.Sourcable"):
return None
return javabridge.call(self.jobject, "toSource", "(Ljava/lang/String;Lweka/core/Instances;)Ljava/lang/String;", classname, data.jobject)
@classmethod
def make_copy(cls, flter):
"""
Creates a copy of the filter.
:param flter: the filter to copy
:type flter: Filter
:return: the copy of the filter
:rtype: Filter
"""
return Filter(
jobject=javabridge.static_call(
"weka/filters/Filter", "makeCopy",
"(Lweka/filters/Filter;)Lweka/filters/Filter;", flter.jobject))
class MultiFilter(Filter):
"""
Wrapper class for weka.filters.MultiFilter.
"""
def __init__(self, jobject=None, options=None):
"""
Initializes the MultiFilter instance using either creating new instance or using the supplied JB_Object.
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: list of commandline options
:type options: list
"""
if jobject is None:
classname = "weka.filters.MultiFilter"
jobject = MultiFilter.new_instance(classname)
self.enforce_type(jobject, "weka.filters.MultiFilter")
super(MultiFilter, self).__init__(jobject=jobject, options=options)
@property
def filters(self):
"""
Returns the list of base filters.
:return: the filter list
:rtype: list
"""
objects = javabridge.get_env().get_object_array_elements(
javabridge.call(self.jobject, "getFilters", "()[Lweka/filters/Filter;"))
result = []
for obj in objects:
result.append(Filter(jobject=obj))
return result
@filters.setter
def filters(self, filters):
"""
Sets the base filters.
:param filters: the list of base filters to use
:type filters: list
"""
obj = []
for fltr in filters:
obj.append(fltr.jobject)
javabridge.call(self.jobject, "setFilters", "([Lweka/filters/Filter;)V", obj)
class StringToWordVector(Filter):
"""
Wrapper class for weka.filters.unsupervised.attribute.StringToWordVector.
"""
def __init__(self, jobject=None, options=None):
"""
Initializes the StringToWordVector instance using either creating new instance or using the supplied JB_Object.
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: list of commandline options
:type options: list
"""
if jobject is None:
classname = "weka.filters.unsupervised.attribute.StringToWordVector"
jobject = StringToWordVector.new_instance(classname)
self.enforce_type(jobject, "weka.filters.unsupervised.attribute.StringToWordVector")
super(StringToWordVector, self).__init__(jobject=jobject, options=options)
@property
def stemmer(self):
"""
Returns the stemmer.
:return: the stemmer
:rtype: Stemmer
"""
return Stemmer(
jobject=javabridge.call(self.jobject, "getStemmer", "()Lweka/core/stemmers/Stemmer;"))
@stemmer.setter
def stemmer(self, stemmer):
"""
Sets the stemmer.
:param stemmer: the stemmer to use
:type stemmer: Stemmer
"""
javabridge.call(
self.jobject, "setStemmer", "(Lweka/core/stemmers/Stemmer;)V", stemmer.jobject)
@property
def stopwords(self):
"""
Returns the stopwords handler.
:return: the stopwords handler
:rtype: Stopwords
"""
return Stopwords(
jobject=javabridge.call(self.jobject, "getStopwordsHandler", "()Lweka/core/stopwords/StopwordsHandler;"))
@stopwords.setter
def stopwords(self, stopwords):
"""
Sets the stopwords handler.
:param stopwords: the stopwords handler to use
:type stopwords: Stopwords
"""
javabridge.call(
self.jobject, "setStopwordsHandler", "(Lweka/core/stopwords/StopwordsHandler;)V", stopwords.jobject)
@property
def tokenizer(self):
"""
Returns the tokenizer.
:return: the tokenizer
:rtype: Tokenizer
"""
return Tokenizer(
jobject=javabridge.call(self.jobject, "getTokenizer", "()Lweka/core/tokenizers/Tokenizer;"))
@tokenizer.setter
def tokenizer(self, tokenizer):
"""
Sets the tokenizer.
:param tokenizer: the tokenizer to use
:type tokenizer: Tokenizer
"""
javabridge.call(
self.jobject, "setTokenizer", "(Lweka/core/tokenizers/Tokenizer;)V", tokenizer.jobject)
if __name__ == "__main__":
try:
main()
except Exception, ex:
print(ex)
|
fracpete/python-weka-wrapper | python/weka/core/converters.py | IncrementalLoaderIterator.next | python | def next(self):
result = javabridge.call(
self.loader.jobject, "getNextInstance",
"(Lweka/core/Instances;)Lweka/core/Instance;", self.structure.jobject)
if result is None:
raise StopIteration()
else:
return Instance(result) | Reads the next dataset row.
:return: the next row
:rtype: Instance | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/converters.py#L133-L146 | null | class IncrementalLoaderIterator(object):
"""
Iterator for dataset rows when loarding incrementally.
"""
def __init__(self, loader, structure):
"""
:param loader: the loader instance to use for loading the data incrementally
:type loader: Loader
:param structure: the dataset structure
:type structure: Instances
"""
self.loader = loader
self.structure = structure
def __iter__(self):
"""
Returns itself.
"""
return self
|
fracpete/python-weka-wrapper | python/weka/plot/classifiers.py | plot_classifier_errors | python | def plot_classifier_errors(predictions, absolute=True, max_relative_size=50, absolute_size=50, title=None,
outfile=None, wait=True):
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
actual = []
predicted = []
error = None
cls = None
for pred in predictions:
actual.append(pred.actual)
predicted.append(pred.predicted)
if isinstance(pred, NumericPrediction):
if error is None:
error = []
error.append(abs(pred.error))
elif isinstance(pred, NominalPrediction):
if cls is None:
cls = []
if pred.actual != pred.predicted:
cls.append(1)
else:
cls.append(0)
fig, ax = plt.subplots()
if error is None and cls is None:
ax.scatter(actual, predicted, s=absolute_size, alpha=0.5)
elif cls is not None:
ax.scatter(actual, predicted, c=cls, s=absolute_size, alpha=0.5)
elif error is not None:
if not absolute:
min_err = min(error)
max_err = max(error)
factor = (max_err - min_err) / max_relative_size
for i in xrange(len(error)):
error[i] = error[i] / factor * max_relative_size
ax.scatter(actual, predicted, s=error, alpha=0.5)
ax.set_xlabel("actual")
ax.set_ylabel("predicted")
if title is None:
title = "Classifier errors"
ax.set_title(title)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
ax.grid(True)
fig.canvas.set_window_title(title)
plt.draw()
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show() | Plots the classifers for the given list of predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param predictions: the predictions to plot
:type predictions: list
:param absolute: whether to use absolute errors as size or relative ones
:type absolute: bool
:param max_relative_size: the maximum size in point in case of relative mode
:type max_relative_size: int
:param absolute_size: the size in point in case of absolute mode
:type absolute_size: int
:param title: an optional title
:type title: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/plot/classifiers.py#L30-L98 | null | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# classifiers.py
# Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com)
import javabridge
import logging
import weka.plot as plot
if plot.matplotlib_available:
import matplotlib.pyplot as plt
from weka.core.classes import JavaObject, join_options
from weka.core.dataset import Instances
from weka.classifiers import Classifier, Evaluation, NumericPrediction, NominalPrediction
# logging setup
logger = logging.getLogger(__name__)
def generate_thresholdcurve_data(evaluation, class_index):
"""
Generates the threshold curve data from the evaluation object's predictions.
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the 0-based index of the class-label to create the plot for
:type class_index: int
:return: the generated threshold curve data
:rtype: Instances
"""
jtc = JavaObject.new_instance("weka.classifiers.evaluation.ThresholdCurve")
pred = javabridge.call(evaluation.jobject, "predictions", "()Ljava/util/ArrayList;")
result = Instances(
javabridge.call(jtc, "getCurve", "(Ljava/util/ArrayList;I)Lweka/core/Instances;", pred, class_index))
return result
def get_thresholdcurve_data(data, xname, yname):
"""
Retrieves x and y columns from of the data generated by the weka.classifiers.evaluation.ThresholdCurve
class.
:param data: the threshold curve data
:type data: Instances
:param xname: the name of the X column
:type xname: str
:param yname: the name of the Y column
:type yname: str
:return: tuple of x and y arrays
:rtype: tuple
"""
xi = data.attribute_by_name(xname).index
yi = data.attribute_by_name(yname).index
x = []
y = []
for i in xrange(data.num_instances):
inst = data.get_instance(i)
x.append(inst.get_value(xi))
y.append(inst.get_value(yi))
return x, y
def get_auc(data):
"""
Calculates the area under the ROC curve (AUC).
:param data: the threshold curve data
:type data: Instances
:return: the area
:rtype: float
"""
return javabridge.static_call(
"weka/classifiers/evaluation/ThresholdCurve", "getROCArea", "(Lweka/core/Instances;)D", data.jobject)
def get_prc(data):
"""
Calculates the area under the precision recall curve (PRC).
:param data: the threshold curve data
:type data: Instances
:return: the area
:rtype: float
"""
return javabridge.static_call(
"weka/classifiers/evaluation/ThresholdCurve", "getPRCArea", "(Lweka/core/Instances;)D", data.jobject)
def plot_roc(evaluation, class_index=None, title=None, key_loc="lower right", outfile=None, wait=True):
"""
Plots the ROC (receiver operator characteristics) curve for the given predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the list of 0-based indices of the class-labels to create the plot for
:type class_index: list
:param title: an optional title
:type title: str
:param key_loc: the position string for the key
:type key_loc: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
if class_index is None:
class_index = [0]
ax = None
for cindex in class_index:
data = generate_thresholdcurve_data(evaluation, cindex)
head = evaluation.header
area = get_auc(data)
x, y = get_thresholdcurve_data(data, "False Positive Rate", "True Positive Rate")
if ax is None:
fig, ax = plt.subplots()
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
if title is None:
title = "ROC"
ax.set_title(title)
ax.grid(True)
fig.canvas.set_window_title(title)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plot_label = head.class_attribute.value(cindex) + " (AUC: %0.4f)" % area
ax.plot(x, y, label=plot_label)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
plt.draw()
plt.legend(loc=key_loc, shadow=True)
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
def plot_prc(evaluation, class_index=None, title=None, key_loc="lower center", outfile=None, wait=True):
"""
Plots the PRC (precision recall) curve for the given predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the list of 0-based indices of the class-labels to create the plot for
:type class_index: list
:param title: an optional title
:type title: str
:param key_loc: the location string for the key
:type key_loc: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
if class_index is None:
class_index = [0]
ax = None
for cindex in class_index:
data = generate_thresholdcurve_data(evaluation, cindex)
head = evaluation.header
area = get_prc(data)
x, y = get_thresholdcurve_data(data, "Recall", "Precision")
if ax is None:
fig, ax = plt.subplots()
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
if title is None:
title = "PRC"
ax.set_title(title)
fig.canvas.set_window_title(title)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
ax.grid(True)
plot_label = head.class_attribute.value(cindex) + " (PRC: %0.4f)" % area
ax.plot(x, y, label=plot_label)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
plt.draw()
plt.legend(loc=key_loc, shadow=True)
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
def plot_learning_curve(classifiers, train, test=None, increments=100, metric="percent_correct",
title="Learning curve", label_template="[#] @ $", key_loc="lower right",
outfile=None, wait=True):
"""
Plots a learning curve.
:param classifiers: list of Classifier template objects
:type classifiers: list of Classifier
:param train: dataset to use for the building the classifier, used for evaluating it test set None
:type train: Instances
:param test: optional dataset (or list of datasets) to use for the testing the built classifiers
:type test: list or Instances
:param increments: the increments (>= 1: # of instances, <1: percentage of dataset)
:type increments: float
:param metric: the name of the numeric metric to plot (Evaluation.<metric>)
:type metric: str
:param title: the title for the plot
:type title: str
:param label_template: the template for the label in the plot
(#: 1-based index of classifier, @: full classname, !: simple classname,
$: options, *: 1-based index of test set)
:type label_template: str
:param key_loc: the location string for the key
:type key_loc: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
if not train.has_class():
logger.error("Training set has no class attribute set!")
return
if increments >= 1:
inc = increments
else:
inc = round(train.num_instances * increments)
if test is None:
tst = [train]
elif isinstance(test, list):
tst = test
elif isinstance(test, Instances):
tst = [test]
else:
logger.error("Expected list or Instances object, instead: " + type(test))
return
for t in tst:
if train.equal_headers(t) is not None:
logger.error("Training and test set are not compatible: " + train.equal_headers(t))
return
steps = []
cls = []
evls = {}
for classifier in classifiers:
cl = Classifier.make_copy(classifier)
cls.append(cl)
evls[cl] = {}
for t in tst:
evls[cl][t] = []
for i in xrange(train.num_instances):
if (i > 0) and (i % inc == 0):
steps.append(i+1)
for cl in cls:
# train
if cl.is_updateable:
if i == 0:
tr = Instances.copy_instances(train, 0, 1)
cl.build_classifier(tr)
else:
cl.update_classifier(train.get_instance(i))
else:
if (i > 0) and (i % inc == 0):
tr = Instances.copy_instances(train, 0, i + 1)
cl.build_classifier(tr)
# evaluate
if (i > 0) and (i % inc == 0):
for t in tst:
evl = Evaluation(t)
evl.test_model(cl, t)
evls[cl][t].append(getattr(evl, metric))
fig, ax = plt.subplots()
ax.set_xlabel("# of instances")
ax.set_ylabel(metric)
ax.set_title(title)
fig.canvas.set_window_title(title)
ax.grid(True)
i = 0
for cl in cls:
evlpertest = evls[cl]
i += 1
n = 0
for t in tst:
evl = evlpertest[t]
n += 1
plot_label = label_template.\
replace("#", str(i)).\
replace("*", str(n)).\
replace("@", cl.classname).\
replace("!", cl.classname[cl.classname.rfind(".") + 1:]).\
replace("$", join_options(cl.config))
ax.plot(steps, evl, label=plot_label)
plt.draw()
plt.legend(loc=key_loc, shadow=True)
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
|
fracpete/python-weka-wrapper | python/weka/classifiers.py | main | python | def main():
parser = argparse.ArgumentParser(
description='Performs classification/regression from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="Training set file")
parser.add_argument("-T", metavar="test", dest="test", help="Test set file")
parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index")
parser.add_argument("-d", metavar="outmodel", dest="outmodel", help="model output file name")
parser.add_argument("-l", metavar="inmodel", dest="inmodel", help="model input file name")
parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds for cross-validation")
parser.add_argument("-s", metavar="seed", dest="seed", help="seed value for randomization")
parser.add_argument("-v", action="store_true", dest="notrainstats", help="no statistics for training")
parser.add_argument("-o", action="store_true", dest="onlystats", help="only statistics, don't output model")
parser.add_argument("-i", action="store_true", dest="irstats", help="output information retrieval statistics")
parser.add_argument("-k", action="store_true", dest="itstats", help="output information theoretic statistics")
parser.add_argument("-m", metavar="costmatrix", dest="costmatrix", help="cost matrix file")
parser.add_argument("-g", metavar="graph", dest="graph", help="output file for graph (if supported)")
parser.add_argument("classifier", help="classifier classname, e.g., weka.classifiers.trees.J48")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional classifier options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.train is not None:
params.extend(["-t", parsed.train])
if parsed.test is not None:
params.extend(["-T", parsed.test])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
if parsed.outmodel is not None:
params.extend(["-d", parsed.outmodel])
if parsed.inmodel is not None:
params.extend(["-l", parsed.inmodel])
if parsed.numfolds is not None:
params.extend(["-x", parsed.numfolds])
if parsed.seed is not None:
params.extend(["-s", parsed.seed])
if parsed.notrainstats:
params.append("-v")
if parsed.onlystats:
params.append("-o")
if parsed.irstats:
params.append("-i")
if parsed.itstats:
params.append("-k")
if parsed.costmatrix is not None:
params.extend(["-m", parsed.costmatrix])
if parsed.graph is not None:
params.extend(["-g", parsed.graph])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
classifier = Classifier(classname=parsed.classifier)
if len(parsed.option) > 0:
classifier.options = parsed.option
print(Evaluation.evaluate_model(classifier, params))
except Exception, e:
print(e)
finally:
jvm.stop() | Runs a classifier from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/classifiers.py#L2117-L2185 | null | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# classifiers.py
# Copyright (C) 2014-2019 Fracpete (pythonwekawrapper at gmail dot com)
import sys
import os
import javabridge
import logging
import argparse
import weka.core.jvm as jvm
import weka.core.types as arrays
import weka.core.classes as classes
from numpy import *
from weka.core.classes import JavaObject, join_options, OptionHandler, Random, SelectedTag, Tags, Tag, JavaArray, \
is_instance_of
from weka.core.classes import AbstractParameter
from weka.core.capabilities import Capabilities
from weka.core.dataset import Instances, Instance, Attribute
from weka.filters import Filter
# logging setup
logger = logging.getLogger("weka.classifiers")
class Classifier(OptionHandler):
"""
Wrapper class for classifiers.
"""
def __init__(self, classname="weka.classifiers.rules.ZeroR", jobject=None, options=None):
"""
Initializes the specified classifier using either the classname or the supplied JB_Object.
:param classname: the classname of the classifier
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = Classifier.new_instance(classname)
self.enforce_type(jobject, "weka.classifiers.Classifier")
self.is_updateable = self.check_type(jobject, "weka.classifiers.UpdateableClassifier")
self.is_drawable = self.check_type(jobject, "weka.core.Drawable")
self.is_batchpredictor = self.check_type(jobject, "weka.core.BatchPredictor")
super(Classifier, self).__init__(jobject=jobject, options=options)
self.__classify = javabridge.make_call(self.jobject, "classifyInstance", "(Lweka/core/Instance;)D")
self.__distribution = javabridge.make_call(self.jobject, "distributionForInstance", "(Lweka/core/Instance;)[D")
if self.is_batchpredictor:
self.__distributions = javabridge.make_call(
self.jobject, "distributionsForInstances", "(Lweka/core/Instances;)[[D")
@property
def capabilities(self):
"""
Returns the capabilities of the classifier.
:return: the capabilities
:rtype: Capabilities
"""
return Capabilities(javabridge.call(self.jobject, "getCapabilities", "()Lweka/core/Capabilities;"))
def build_classifier(self, data):
"""
Builds the classifier with the data.
:param data: the data to train the classifier with
:type data: Instances
"""
javabridge.call(self.jobject, "buildClassifier", "(Lweka/core/Instances;)V", data.jobject)
def update_classifier(self, inst):
"""
Updates the classifier with the instance.
:param inst: the Instance to update the classifier with
:type inst: Instance
"""
if self.is_updateable:
javabridge.call(self.jobject, "updateClassifier", "(Lweka/core/Instance;)V", inst.jobject)
else:
logger.critical(classes.get_classname(self.jobject) + " is not updateable!")
def classify_instance(self, inst):
"""
Peforms a prediction.
:param inst: the Instance to get a prediction for
:type inst: Instance
:return: the classification (either regression value or 0-based label index)
:rtype: float
"""
return self.__classify(inst.jobject)
def distribution_for_instance(self, inst):
"""
Peforms a prediction, returning the class distribution.
:param inst: the Instance to get the class distribution for
:type inst: Instance
:return: the class distribution array
:rtype: ndarray
"""
pred = self.__distribution(inst.jobject)
return javabridge.get_env().get_double_array_elements(pred)
def distributions_for_instances(self, data):
"""
Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray
"""
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None
@property
def batch_size(self):
"""
Returns the batch size, in case this classifier is a batch predictor.
:return: the batch size, None if not a batch predictor
:rtype: str
"""
if self.is_batchpredictor:
return javabridge.call(self.jobject, "getBatchSize", "()Ljava/lang/String;")
else:
return None
@batch_size.setter
def batch_size(self, size):
"""
Sets the batch size, in case this classifier is a batch predictor.
:param size: the size of the batch
:type size: str
"""
if self.is_batchpredictor:
javabridge.call(self.jobject, "setBatchSize", "(Ljava/lang/String;)V", size)
def has_efficient_batch_prediction(self):
"""
Returns whether the classifier implements a more efficient batch prediction.
:return: True if a more efficient batch prediction is implemented, always False if not batch predictor
:rtype: bool
"""
if self.is_batchpredictor:
return javabridge.call(self.jobject, "implementsMoreEfficientBatchPrediction", "()Z")
else:
return False
@property
def graph_type(self):
"""
Returns the graph type if classifier implements weka.core.Drawable, otherwise -1.
:return: the type
:rtype: int
"""
if self.is_drawable:
return javabridge.call(self.jobject, "graphType", "()I")
else:
return -1
@property
def graph(self):
"""
Returns the graph if classifier implements weka.core.Drawable, otherwise None.
:return: the generated graph string
:rtype: str
"""
if self.is_drawable:
return javabridge.call(self.jobject, "graph", "()Ljava/lang/String;")
else:
return None
def to_source(self, classname):
"""
Returns the model as Java source code if the classifier implements weka.classifiers.Sourcable.
:param classname: the classname for the generated Java code
:type classname: str
:return: the model as source code string
:rtype: str
"""
if not self.check_type(self.jobject, "weka.classifiers.Sourcable"):
return None
return javabridge.call(self.jobject, "toSource", "(Ljava/lang/String;)Ljava/lang/String;", classname)
@classmethod
def make_copy(cls, classifier):
"""
Creates a copy of the classifier.
:param classifier: the classifier to copy
:type classifier: Classifier
:return: the copy of the classifier
:rtype: Classifier
"""
return Classifier(
jobject=javabridge.static_call(
"weka/classifiers/AbstractClassifier", "makeCopy",
"(Lweka/classifiers/Classifier;)Lweka/classifiers/Classifier;", classifier.jobject))
class SingleClassifierEnhancer(Classifier):
"""
Wrapper class for classifiers that use a single base classifier.
"""
def __init__(self, classname=None, jobject=None, options=None):
"""
Initializes the specified classifier using either the classname or the supplied JB_Object.
:param classname: the classname of the classifier
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = Classifier.new_instance(classname)
self.enforce_type(jobject, "weka.classifiers.SingleClassifierEnhancer")
super(SingleClassifierEnhancer, self).__init__(classname=classname, jobject=jobject, options=options)
@property
def classifier(self):
"""
Returns the base classifier.
;return: the base classifier
:rtype: Classifier
"""
return Classifier(jobject=javabridge.call(self.jobject, "getClassifier", "()Lweka/classifiers/Classifier;"))
@classifier.setter
def classifier(self, classifier):
"""
Sets the base classifier.
:param classifier: the base classifier to use
:type classifier: Classifier
"""
javabridge.call(self.jobject, "setClassifier", "(Lweka/classifiers/Classifier;)V", classifier.jobject)
class FilteredClassifier(SingleClassifierEnhancer):
"""
Wrapper class for the filtered classifier.
"""
def __init__(self, jobject=None, options=None):
"""
Initializes the specified classifier using its classname or the supplied JB_Object.
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
classname = "weka.classifiers.meta.FilteredClassifier"
if jobject is None:
jobject = Classifier.new_instance(classname)
else:
self.enforce_type(jobject, classname)
super(FilteredClassifier, self).__init__(jobject=jobject, options=options)
@property
def filter(self):
"""
Returns the filter.
:return: the filter in use
:rtype: Filter
"""
return Filter(jobject=javabridge.call(self.jobject, "getFilter", "()Lweka/filters/Filter;"))
@filter.setter
def filter(self, filtr):
"""
Sets the filter.
:param filtr: the filter to use
:type filtr: Filter
"""
javabridge.call(self.jobject, "setFilter", "(Lweka/filters/Filter;)V", filtr.jobject)
def check_for_modified_class_attribute(self, check):
"""
Sets whether to check for class attribute modifications.
:param check: True if checking for modifications
:type check: bool
"""
javabridge.call(self.jobject, "setDoNotCheckForModifiedClassAttribute", "(Z)V", not check)
class GridSearch(SingleClassifierEnhancer):
"""
Wrapper class for the GridSearch meta-classifier.
"""
def __init__(self, jobject=None, options=None):
"""
Initializes the specified classifier using its classname or the supplied JB_Object.
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
classname = "weka.classifiers.meta.GridSearch"
if jobject is None:
jobject = GridSearch.new_instance(classname)
if jobject is None:
raise Exception(
"Failed to instantiate GridSearch - package installed and jvm started with package support?")
else:
self.enforce_type(jobject, classname)
super(GridSearch, self).__init__(jobject=jobject, options=options)
self.tags_evaluation = Tags.get_tags("weka.classifiers.meta.GridSearch", "TAGS_EVALUATION")
@property
def evaluation(self):
"""
Returns the currently set statistic used for evaluation.
:return: the statistic
:rtype: SelectedTag
"""
return SelectedTag(
javabridge.call(self.jobject, "getEvaluation", "()Lweka/core/SelectedTag;"))
@evaluation.setter
def evaluation(self, evl):
"""
Sets the statistic to use for evaluation.
:param evl: the statistic
:type evl: SelectedTag, Tag or str
"""
if isinstance(evl, str):
evl = self.tags_evaluation.find(evl)
if isinstance(evl, Tag):
evl = SelectedTag(tag_id=evl.ident, tags=self.tags_evaluation)
javabridge.call(self.jobject, "setEvaluation", "(Lweka/core/SelectedTag;)V", evl.jobject)
@property
def x(self):
"""
Returns a dictionary with all the current values for the X of the grid.
Keys for the dictionary: property, min, max, step, base, expression
Types: property=str, min=float, max=float, step=float, base=float, expression=str
:return: the dictionary with the parameters
:rtype: dict
"""
result = {}
result["property"] = javabridge.call(self.jobject, "getXProperty", "()Ljava/lang/String;")
result["min"] = javabridge.call(self.jobject, "getXMin", "()D")
result["max"] = javabridge.call(self.jobject, "getXMax", "()D")
result["step"] = javabridge.call(self.jobject, "getXStep", "()D")
result["base"] = javabridge.call(self.jobject, "getXBase", "()D")
result["expression"] = javabridge.call(self.jobject, "getXExpression", "()Ljava/lang/String;")
return result
@x.setter
def x(self, d):
"""
Allows to configure the X of the grid with one method call.
Keys for the dictionary: property, min, max, step, base, expression
Types: property=str, min=float, max=float, step=float, base=float, expression=str
:param d: the dictionary with the parameters
:type d: dict
"""
if "property" in d:
javabridge.call(self.jobject, "setXProperty", "(Ljava/lang/String;)V", d["property"])
if "min" in d:
javabridge.call(self.jobject, "setXMin", "(D)V", d["min"])
if "max" in d:
javabridge.call(self.jobject, "setXMax", "(D)V", d["max"])
if "step" in d:
javabridge.call(self.jobject, "setXStep", "(D)V", d["step"])
if "base" in d:
javabridge.call(self.jobject, "setXBase", "(D)V", d["base"])
if "expression" in d:
javabridge.call(self.jobject, "setXExpression", "(Ljava/lang/String;)V", d["expression"])
@property
def y(self):
"""
Returns a dictionary with all the current values for the Y of the grid.
Keys for the dictionary: property, min, max, step, base, expression
Types: property=str, min=float, max=float, step=float, base=float, expression=str
:return: the dictionary with the parameters
:rtype: dict
"""
result = {}
result["property"] = javabridge.call(self.jobject, "getYProperty", "()Ljava/lang/String;")
result["min"] = javabridge.call(self.jobject, "getYMin", "()D")
result["max"] = javabridge.call(self.jobject, "getYMax", "()D")
result["step"] = javabridge.call(self.jobject, "getYStep", "()D")
result["base"] = javabridge.call(self.jobject, "getYBase", "()D")
result["expression"] = javabridge.call(self.jobject, "getYExpression", "()Ljava/lang/String;")
return result
@y.setter
def y(self, d):
"""
Allows to configure the Y of the grid with one method call.
Keys for the dictionary: property, min, max, step, base, expression
Types: property=str, min=float, max=float, step=float, base=float, expression=str
:param d: the dictionary with the parameters
:type d: dict
"""
if "property" in d:
javabridge.call(self.jobject, "setYProperty", "(Ljava/lang/String;)V", d["property"])
if "min" in d:
javabridge.call(self.jobject, "setYMin", "(D)V", d["min"])
if "max" in d:
javabridge.call(self.jobject, "setYMax", "(D)V", d["max"])
if "step" in d:
javabridge.call(self.jobject, "setYStep", "(D)V", d["step"])
if "base" in d:
javabridge.call(self.jobject, "setYBase", "(D)V", d["base"])
if "expression" in d:
javabridge.call(self.jobject, "setYExpression", "(Ljava/lang/String;)V", d["expression"])
@property
def best(self):
"""
Returns the best classifier setup found during the th search.
:return: the best classifier setup
:rtype: Classifier
"""
return Classifier(jobject=javabridge.call(self.jobject, "getBestClassifier", "()Lweka/classifiers/Classifier;"))
class MultiSearch(SingleClassifierEnhancer):
"""
Wrapper class for the MultiSearch meta-classifier.
NB: 'multi-search-weka-package' must be installed (https://github.com/fracpete/multisearch-weka-package),
version 2016.1.15 or later.
"""
def __init__(self, jobject=None, options=None):
"""
Initializes the specified classifier using its classname or the supplied JB_Object.
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
classname = "weka.classifiers.meta.MultiSearch"
if jobject is None:
jobject = MultiSearch.new_instance(classname)
else:
self.enforce_type(jobject, classname)
super(MultiSearch, self).__init__(jobject=jobject, options=options)
self.tags_evaluation = Tags.get_object_tags(self, "getMetricsTags")
@property
def evaluation(self):
"""
Returns the currently set statistic used for evaluation.
:return: the statistic
:rtype: SelectedTag
"""
return SelectedTag(
javabridge.call(self.jobject, "getEvaluation", "()Lweka/core/SelectedTag;"))
@evaluation.setter
def evaluation(self, evl):
"""
Sets the statistic to use for evaluation.
:param evl: the statistic
:type evl: SelectedTag, Tag or str
"""
if isinstance(evl, str):
evl = self.tags_evaluation.find(evl)
if isinstance(evl, Tag):
evl = SelectedTag(tag_id=evl.ident, tags=self.tags_evaluation)
javabridge.call(self.jobject, "setEvaluation", "(Lweka/core/SelectedTag;)V", evl.jobject)
@property
def parameters(self):
"""
Returns the list of currently set search parameters.
:return: the list of AbstractSearchParameter objects
:rtype: list
"""
array = JavaArray(
javabridge.call(self.jobject, "getSearchParameters", "()[Lweka/core/setupgenerator/AbstractParameter;"))
result = []
for item in array:
result.append(AbstractParameter(jobject=item.jobject))
return result
@parameters.setter
def parameters(self, params):
"""
Sets the list of search parameters to use.
:param params: list of AbstractSearchParameter objects
:type params: list
"""
array = JavaArray(JavaArray.new_instance("weka.core.setupgenerator.AbstractParameter", len(params)))
for idx, obj in enumerate(params):
array[idx] = obj.jobject
javabridge.call(self.jobject, "setSearchParameters", "([Lweka/core/setupgenerator/AbstractParameter;)V", array.jobject)
@property
def best(self):
"""
Returns the best classifier setup found during the th search.
:return: the best classifier setup
:rtype: Classifier
"""
return Classifier(jobject=javabridge.call(self.jobject, "getBestClassifier", "()Lweka/classifiers/Classifier;"))
class MultipleClassifiersCombiner(Classifier):
"""
Wrapper class for classifiers that use a multiple base classifiers.
"""
def __init__(self, classname=None, jobject=None, options=None):
"""
Initializes the specified classifier using either the classname or the supplied JB_Object.
:param classname: the classname of the classifier
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: list of commandline options
:type options: list
"""
if jobject is None:
jobject = Classifier.new_instance(classname)
self.enforce_type(jobject, "weka.classifiers.MultipleClassifiersCombiner")
super(MultipleClassifiersCombiner, self).__init__(classname=classname, jobject=jobject, options=options)
@property
def classifiers(self):
"""
Returns the list of base classifiers.
:return: the classifier list
:rtype: list
"""
objects = javabridge.get_env().get_object_array_elements(
javabridge.call(self.jobject, "getClassifiers", "()[Lweka/classifiers/Classifier;"))
result = []
for obj in objects:
result.append(Classifier(jobject=obj))
return result
@classifiers.setter
def classifiers(self, classifiers):
"""
Sets the base classifiers.
:param classifiers: the list of base classifiers to use
:type classifiers: list
"""
obj = []
for classifier in classifiers:
obj.append(classifier.jobject)
javabridge.call(self.jobject, "setClassifiers", "([Lweka/classifiers/Classifier;)V", obj)
class Kernel(OptionHandler):
"""
Wrapper class for kernels.
"""
def __init__(self, classname=None, jobject=None, options=None):
"""
Initializes the specified kernel using either the classname or the supplied JB_Object.
:param classname: the classname of the kernel
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = Classifier.new_instance(classname)
self.enforce_type(jobject, "weka.classifiers.functions.supportVector.Kernel")
super(Kernel, self).__init__(jobject=jobject, options=options)
def capabilities(self):
"""
Returns the capabilities of the classifier.
:return: the capabilities
:rtype: Capabilities
"""
return Capabilities(javabridge.call(self.jobject, "getCapabilities", "()Lweka/core/Capabilities;"))
@property
def checks_turned_off(self):
"""
Returns whether checks are turned off.
:return: True if checks turned off
:rtype: bool
"""
return javabridge.call(self.jobject, "getChecksTurnedOff", "()Z")
@checks_turned_off.setter
def checks_turned_off(self, off):
"""
Turns any checks on/off.
:param off: True to turn off checks
:type off: bool
"""
javabridge.call(self.jobject, "setChecksTurnedOff", "(Z)V", off)
def clean(self):
"""
Frees the memory used by the kernel.
"""
javabridge.call(self.jobject, "clean", "()V")
def build_kernel(self, data):
"""
Builds the classifier with the data.
:param data: the data to train the classifier with
:type data: Instances
"""
javabridge.call(self.jobject, "buildKernel", "(Lweka/core/Instances;)V", data.jobject)
def eval(self, id1, id2, inst1):
"""
Computes the result of the kernel function for two instances. If id1 == -1, eval use inst1 instead of an
instance in the dataset.
:param id1: the index of the first instance in the dataset
:type id1: int
:param id2: the index of the second instance in the dataset
:type id2: int
:param inst1: the instance corresponding to id1 (used if id1 == -1)
:type inst1: Instance
"""
jinst1 = None
if inst1 is not None:
jinst1 = inst1.jobject
return javabridge.call(self.jobject, "eval", "(IILweka/core/Instance;)D", id1, id2, jinst1)
@classmethod
def make_copy(cls, kernel):
"""
Creates a copy of the kernel.
:param kernel: the kernel to copy
:type kernel: Kernel
:return: the copy of the kernel
:rtype: Kernel
"""
return Kernel(
jobject=javabridge.static_call(
"weka/classifiers/functions/supportVector/Kernel", "makeCopy",
"(Lweka/classifiers/functions/supportVector/Kernel;)Lweka/classifiers/functions/supportVector/Kernel;",
kernel.jobject))
class KernelClassifier(Classifier):
"""
Wrapper class for classifiers that have a kernel property, like SMO.
"""
def __init__(self, classname=None, jobject=None, options=None):
"""
Initializes the specified classifier using either the classname or the supplied JB_Object.
:param classname: the classname of the classifier
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: list of commandline options
:type options: list
"""
if jobject is None:
jobject = Classifier.new_instance(classname)
self.enforce_type(jobject, "weka.classifiers.Classifier")
if not javabridge.static_call(
"weka/classifiers/KernelHelper", "hasKernelProperty",
"(Ljava/lang/Object;)Z",
jobject):
raise Exception("Does not handle a kernel: " + classes.get_classname(jobject))
super(KernelClassifier, self).__init__(classname=classname, jobject=jobject, options=options)
@property
def kernel(self):
"""
Returns the current kernel.
:return: the kernel or None if none found
:rtype: Kernel
"""
result = javabridge.static_call(
"weka/classifiers/KernelHelper", "getKernel",
"(Ljava/lang/Object;)Lweka/classifiers/functions/supportVector/Kernel;",
self.jobject)
if result is None:
return None
else:
return Kernel(jobject=result)
@kernel.setter
def kernel(self, kernel):
"""
Sets the kernel.
:param kernel: the kernel to set
:type kernel: Kernel
"""
result = javabridge.static_call(
"weka/classifiers/KernelHelper", "setKernel",
"(Ljava/lang/Object;Lweka/classifiers/functions/supportVector/Kernel;)Z",
self.jobject, kernel.jobject)
if not result:
raise Exception("Failed to set kernel!")
class Prediction(JavaObject):
"""
Wrapper class for a prediction.
"""
def __init__(self, jobject):
"""
Initializes the wrapper.
:param jobject: the prediction to wrap
:type jobject: JB_Object
"""
self.enforce_type(jobject, "weka.classifiers.evaluation.Prediction")
super(Prediction, self).__init__(jobject)
@property
def actual(self):
"""
Returns the actual value.
:return: the actual value (internal representation)
:rtype: float
"""
return javabridge.call(self.jobject, "actual", "()D")
@property
def predicted(self):
"""
Returns the predicted value.
:return: the predicted value (internal representation)
:rtype: float
"""
return javabridge.call(self.jobject, "predicted", "()D")
@property
def weight(self):
"""
Returns the weight.
:return: the weight of the Instance that was used
:rtype: float
"""
return javabridge.call(self.jobject, "weight", "()D")
class NominalPrediction(Prediction):
"""
Wrapper class for a nominal prediction.
"""
def __init__(self, jobject):
"""
Initializes the wrapper.
:param jobject: the prediction to wrap
:type jobject: JB_Object
"""
self.enforce_type(jobject, "weka.classifiers.evaluation.NominalPrediction")
super(NominalPrediction, self).__init__(jobject)
@property
def distribution(self):
"""
Returns the class distribution.
:return: the class distribution list
:rtype: ndarray
"""
return javabridge.get_env().get_double_array_elements(javabridge.call(self.jobject, "distribution", "()[D"))
@property
def margin(self):
"""
Returns the margin.
:return: the margin
:rtype: float
"""
return javabridge.call(self.jobject, "margin", "()D")
class NumericPrediction(Prediction):
"""
Wrapper class for a numeric prediction.
"""
def __init__(self, jobject):
"""
Initializes the wrapper.
:param jobject: the prediction to wrap
:type jobject: JB_Object
"""
self.enforce_type(jobject, "weka.classifiers.evaluation.NumericPrediction")
super(NumericPrediction, self).__init__(jobject)
@property
def error(self):
"""
Returns the error.
:return: the error
:rtype: float
"""
return javabridge.call(self.jobject, "error", "()D")
@property
def prediction_intervals(self):
"""
Returns the prediction intervals.
:return: the intervals
:rtype: ndarray
"""
return arrays.double_matrix_to_ndarray(javabridge.call(self.jobject, "predictionIntervals", "()[[D"))
class CostMatrix(JavaObject):
"""
Class for storing and manipulating a misclassification cost matrix. The element at position i,j in the matrix
is the penalty for classifying an instance of class j as class i. Cost values can be fixed or computed on a
per-instance basis (cost sensitive evaluation only) from the value of an attribute or an expression involving
attribute(s).
"""
def __init__(self, matrx=None, num_classes=None):
"""
Initializes the matrix object.
:param matrx: the matrix to copy
:type matrx: CostMatrix or ndarray or JB_Object
:param num_classes: the number of classes
:type num_classes: int
"""
if matrx is not None:
if isinstance(matrx, CostMatrix):
jobject = javabridge.make_instance(
"weka/classifiers/CostMatrix", "(Lweka/classifiers/CostMatrix;)V", matrx.jobject)
super(CostMatrix, self).__init__(jobject)
elif isinstance(matrx, ndarray):
shp = matrx.shape
if len(shp) != 2:
raise Exception("Numpy array must be a 2-dimensional array!")
rows, cols = shp
if rows == cols:
cmatrix = CostMatrix(num_classes=rows)
for r in xrange(rows):
for c in xrange(cols):
cmatrix.set_element(r, c, matrx[r][c])
super(CostMatrix, self).__init__(cmatrix.jobject)
else:
raise Exception("Numpy array must be a square matrix!")
elif isinstance(matrx, javabridge.JB_Object):
super(CostMatrix, self).__init__(matrx)
else:
raise Exception(
"Matrix must be either a CostMatrix or a 2-dimensional numpy array: " + str(type(matrx)))
elif not num_classes is None:
jobject = javabridge.make_instance(
"weka/classifiers/CostMatrix", "(I)V", num_classes)
super(CostMatrix, self).__init__(jobject)
else:
raise Exception("Either matrix or number of classes must be provided!")
def apply_cost_matrix(self, data, rnd):
"""
Applies the cost matrix to the data.
:param data: the data to apply to
:type data: Instances
:param rnd: the random number generator
:type rnd: Random
"""
return Instances(
javabridge.call(
self.jobject, "applyCostMatrix", "(Lweka/core/Instances;Ljava/util/Random;)Lweka/core/Instances;",
data.jobject, rnd.jobject))
def expected_costs(self, class_probs, inst=None):
"""
Calculates the expected misclassification cost for each possible class value, given class probability
estimates.
:param class_probs: the class probabilities
:type class_probs: ndarray
:return: the calculated costs
:rtype: ndarray
"""
if inst is None:
costs = javabridge.call(
self.jobject, "expectedCosts", "([D)[D", javabridge.get_env().make_double_array(class_probs))
return javabridge.get_env().get_double_array_elements(costs)
else:
costs = javabridge.call(
self.jobject, "expectedCosts", "([DLweka/core/Instance;)[D",
javabridge.get_env().make_double_array(class_probs), inst.jobject)
return javabridge.get_env().get_double_array_elements(costs)
def get_cell(self, row, col):
"""
Returns the JB_Object at the specified location.
:param row: the 0-based index of the row
:type row: int
:param col: the 0-based index of the column
:type col: int
:return: the object in that cell
:rtype: JB_Object
"""
return javabridge.call(
self.jobject, "getCell", "(II)Ljava/lang/Object;", row, col)
def set_cell(self, row, col, obj):
"""
Sets the JB_Object at the specified location. Automatically unwraps JavaObject.
:param row: the 0-based index of the row
:type row: int
:param col: the 0-based index of the column
:type col: int
:param obj: the object for that cell
:type obj: object
"""
if isinstance(obj, JavaObject):
obj = obj.jobject
javabridge.call(
self.jobject, "setCell", "(IILjava/lang/Object;)V", row, col, obj)
def get_element(self, row, col, inst=None):
"""
Returns the value at the specified location.
:param row: the 0-based index of the row
:type row: int
:param col: the 0-based index of the column
:type col: int
:param inst: the Instace
:type inst: Instance
:return: the value in that cell
:rtype: float
"""
if inst is None:
return javabridge.call(
self.jobject, "getElement", "(II)D", row, col)
else:
return javabridge.call(
self.jobject, "getElement", "(IILweka/core/Instance;)D", row, col, inst.jobject)
def set_element(self, row, col, value):
"""
Sets the float value at the specified location.
:param row: the 0-based index of the row
:type row: int
:param col: the 0-based index of the column
:type col: int
:param value: the float value for that cell
:type value: float
"""
javabridge.call(
self.jobject, "setElement", "(IID)V", row, col, value)
def get_max_cost(self, class_value, inst=None):
"""
Gets the maximum cost for a particular class value.
:param class_value: the class value to get the maximum cost for
:type class_value: int
:param inst: the Instance
:type inst: Instance
:return: the cost
:rtype: float
"""
if inst is None:
return javabridge.call(
self.jobject, "getMaxCost", "(I)D", class_value)
else:
return javabridge.call(
self.jobject, "getElement", "(ILweka/core/Instance;)D", class_value, inst.jobject)
def initialize(self):
"""
Initializes the matrix.
"""
javabridge.call(self.jobject, "initialize", "()V")
def normalize(self):
"""
Normalizes the matrix.
"""
javabridge.call(self.jobject, "normalize", "()V")
@property
def num_columns(self):
"""
Returns the number of columns.
:return: the number of columns
:rtype: int
"""
return javabridge.call(self.jobject, "numColumns", "()I")
@property
def num_rows(self):
"""
Returns the number of rows.
:return: the number of rows
:rtype: int
"""
return javabridge.call(self.jobject, "numRows", "()I")
@property
def size(self):
"""
Returns the number of rows/columns.
:return: the number of rows/columns
:rtype: int
"""
return javabridge.call(self.jobject, "size", "()I")
def to_matlab(self):
"""
Returns the matrix in Matlab format.
:return: the matrix as Matlab formatted string
:rtype: str
"""
return javabridge.call(self.jobject, "toMatlab", "()Ljava/lang/String;")
@classmethod
def parse_matlab(cls, matlab):
"""
Parses the costmatrix definition in matlab format and returns a matrix.
:param matlab: the matlab matrix string, eg [1 2; 3 4].
:type matlab: str
:return: the generated matrix
:rtype: CostMatrix
"""
return CostMatrix(
matrx=javabridge.static_call(
"weka/classifiers/CostMatrix", "parseMatlab",
"(Ljava/lang/String;)Lweka/classifiers/CostMatrix;", matlab))
class Evaluation(JavaObject):
"""
Evaluation class for classifiers.
"""
def __init__(self, data, cost_matrix=None):
"""
Initializes an Evaluation object.
:param data: the data to use to initialize the priors with
:type data: Instances
:param cost_matrix: the cost matrix to use for initializing
:type cost_matrix: CostMatrix
"""
if cost_matrix is None:
jobject = javabridge.make_instance(
"weka/classifiers/EvaluationWrapper", "(Lweka/core/Instances;)V",
data.jobject)
else:
jobject = javabridge.make_instance(
"weka/classifiers/EvaluationWrapper", "(Lweka/core/Instances;Lweka/classifiers/CostMatrix;)V",
data.jobject, cost_matrix.jobject)
self.wrapper = jobject
jobject = javabridge.call(jobject, "getEvaluation", "()Lweka/classifiers/Evaluation;")
super(Evaluation, self).__init__(jobject)
def crossvalidate_model(self, classifier, data, num_folds, rnd, output=None):
"""
Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
"""
if output is None:
generator = []
else:
generator = [output.jobject]
javabridge.call(
self.jobject, "crossValidateModel",
"(Lweka/classifiers/Classifier;Lweka/core/Instances;ILjava/util/Random;[Ljava/lang/Object;)V",
classifier.jobject, data.jobject, num_folds, rnd.jobject, generator)
def evaluate_train_test_split(self, classifier, data, percentage, rnd=None, output=None):
"""
Splits the data into train and test, builds the classifier with the training data and
evaluates it against the test set.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param percentage: the percentage split to use (amount to use for training)
:type percentage: double
:param rnd: the random number generator to use, if None the order gets preserved
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
"""
train_inst, test_inst = data.train_test_split(percentage, rnd=rnd)
cls = Classifier.make_copy(classifier)
cls.build_classifier(train_inst)
self.test_model(cls, test_inst, output=output)
def test_model(self, classifier, data, output=None):
"""
Evaluates the built model using the specified test data and returns the classifications.
:param classifier: the trained classifier to evaluate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param output: the output generator to use
:type output: PredictionOutput
:return: the classifications
:rtype: ndarray
"""
if output is None:
generator = []
else:
output.header = data
generator = [output.jobject]
cls = javabridge.call(
self.jobject, "evaluateModel",
"(Lweka/classifiers/Classifier;Lweka/core/Instances;[Ljava/lang/Object;)[D",
classifier.jobject, data.jobject, generator)
if cls is None:
return None
else:
return javabridge.get_env().get_double_array_elements(cls)
def test_model_once(self, classifier, inst):
"""
Evaluates the built model using the specified test instance and returns the classification.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param inst: the Instance to evaluate on
:type inst: Instances
:return: the classification
:rtype: float
"""
return javabridge.call(
self.jobject, "evaluateModelOnce",
"(Lweka/classifiers/Classifier;Lweka/core/Instance;)D",
classifier.jobject, inst.jobject)
def summary(self, title=None, complexity=False):
"""
Generates a summary.
:param title: optional title
:type title: str
:param complexity: whether to print the complexity information as well
:type complexity: bool
:return: the summary
:rtype: str
"""
if title is None:
return javabridge.call(
self.jobject, "toSummaryString", "()Ljava/lang/String;")
else:
return javabridge.call(
self.jobject, "toSummaryString", "(Ljava/lang/String;Z)Ljava/lang/String;", title, complexity)
def class_details(self, title=None):
"""
Generates the class details.
:param title: optional title
:type title: str
:return: the details
:rtype: str
"""
if title is None:
return javabridge.call(
self.jobject, "toClassDetailsString", "()Ljava/lang/String;")
else:
return javabridge.call(
self.jobject, "toClassDetailsString", "(Ljava/lang/String;)Ljava/lang/String;", title)
def matrix(self, title=None):
"""
Generates the confusion matrix.
:param title: optional title
:type title: str
:return: the matrix
:rtype: str
"""
if title is None:
return javabridge.call(self.jobject, "toMatrixString", "()Ljava/lang/String;")
else:
return javabridge.call(self.jobject, "toMatrixString", "(Ljava/lang/String;)Ljava/lang/String;", title)
def cumulative_margin_distribution(self):
"""
Output the cumulative margin distribution as a string suitable for input for gnuplot or similar package.
:return: the cumulative margin distribution
:rtype: str
"""
return javabridge.call(self.jobject, "toCumulativeMarginDistributionString", "()Ljava/lang/String;")
def area_under_prc(self, class_index):
"""
Returns the area under precision recall curve.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the area
:rtype: float
"""
return javabridge.call(self.jobject, "areaUnderPRC", "(I)D", class_index)
@property
def weighted_area_under_prc(self):
"""
Returns the weighted area under precision recall curve.
:return: the weighted area
:rtype: float
"""
return javabridge.call(self.jobject, "weightedAreaUnderPRC", "()D")
def area_under_roc(self, class_index):
"""
Returns the area under receiver operators characteristics curve.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the area
:rtype: float
"""
return javabridge.call(self.jobject, "areaUnderROC", "(I)D", class_index)
@property
def weighted_area_under_roc(self):
"""
Returns the weighted area under receiver operator characteristic curve.
:return: the weighted area
:rtype: float
"""
return javabridge.call(self.jobject, "weightedAreaUnderROC", "()D")
@property
def avg_cost(self):
"""
Returns the average cost.
:return: the cost
:rtype: float
"""
return javabridge.call(self.jobject, "avgCost", "()D")
@property
def total_cost(self):
"""
Returns the total cost.
:return: the cost
:rtype: float
"""
return javabridge.call(self.jobject, "totalCost", "()D")
@property
def confusion_matrix(self):
"""
Returns the confusion matrix.
:return: the matrix
:rtype: ndarray
"""
return arrays.double_matrix_to_ndarray(javabridge.call(self.jobject, "confusionMatrix", "()[[D"))
@property
def correct(self):
"""
Returns the correct count (nominal classes).
:return: the count
:rtype: float
"""
return javabridge.call(self.jobject, "correct", "()D")
@property
def incorrect(self):
"""
Returns the incorrect count (nominal classes).
:return: the count
:rtype: float
"""
return javabridge.call(self.jobject, "incorrect", "()D")
@property
def unclassified(self):
"""
Returns the unclassified count.
:return: the count
:rtype: float
"""
return javabridge.call(self.jobject, "unclassified", "()D")
@property
def num_instances(self):
"""
Returns the number of instances that had a known class value.
:return: the number of instances
:rtype: float
"""
return javabridge.call(self.jobject, "numInstances", "()D")
@property
def percent_correct(self):
"""
Returns the percent correct (nominal classes).
:return: the percentage
:rtype: float
"""
return javabridge.call(self.jobject, "pctCorrect", "()D")
@property
def percent_incorrect(self):
"""
Returns the percent incorrect (nominal classes).
:return: the percentage
:rtype: float
"""
return javabridge.call(self.jobject, "pctIncorrect", "()D")
@property
def percent_unclassified(self):
"""
Returns the percent unclassified.
:return: the percentage
:rtype: float
"""
return javabridge.call(self.jobject, "pctUnclassified", "()D")
@property
def correlation_coefficient(self):
"""
Returns the correlation coefficient (numeric classes).
:return: the coefficient
:rtype: float
"""
return javabridge.call(self.jobject, "correlationCoefficient", "()D")
def matthews_correlation_coefficient(self, class_index):
"""
Returns the Matthews correlation coefficient (nominal classes).
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the coefficient
:rtype: float
"""
return javabridge.call(self.jobject, "matthewsCorrelationCoefficient", "(I)D", class_index)
@property
def weighted_matthews_correlation(self):
"""
Returns the weighted Matthews correlation (nominal classes).
:return: the correlation
:rtype: float
"""
return javabridge.call(self.jobject, "weightedMatthewsCorrelation", "()D")
@property
def coverage_of_test_cases_by_predicted_regions(self):
"""
Returns the coverage of the test cases by the predicted regions at the confidence level
specified when evaluation was performed.
:return: the coverage
:rtype: float
"""
return javabridge.call(self.jobject, "coverageOfTestCasesByPredictedRegions", "()D")
@property
def size_of_predicted_regions(self):
"""
Returns the average size of the predicted regions, relative to the range of the target in the
training data, at the confidence level specified when evaluation was performed.
:return:the size of the regions
:rtype: float
"""
return javabridge.call(self.jobject, "sizeOfPredictedRegions", "()D")
@property
def error_rate(self):
"""
Returns the error rate (numeric classes).
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "errorRate", "()D")
@property
def mean_absolute_error(self):
"""
Returns the mean absolute error.
:return: the error
:rtype: float
"""
return javabridge.call(self.jobject, "meanAbsoluteError", "()D")
@property
def relative_absolute_error(self):
"""
Returns the relative absolute error.
:return: the error
:rtype: float
"""
return javabridge.call(self.jobject, "relativeAbsoluteError", "()D")
@property
def root_mean_squared_error(self):
"""
Returns the root mean squared error.
:return: the error
:rtype: float
"""
return javabridge.call(self.jobject, "rootMeanSquaredError", "()D")
@property
def root_relative_squared_error(self):
"""
Returns the root relative squared error.
:return: the error
:rtype: float
"""
return javabridge.call(self.jobject, "rootRelativeSquaredError", "()D")
@property
def root_mean_prior_squared_error(self):
"""
Returns the root mean prior squared error.
:return: the error
:rtype: float
"""
return javabridge.call(self.jobject, "rootMeanPriorSquaredError", "()D")
@property
def mean_prior_absolute_error(self):
"""
Returns the mean prior absolute error.
:return: the error
:rtype: float
"""
return javabridge.call(self.jobject, "meanPriorAbsoluteError", "()D")
def false_negative_rate(self, class_index):
"""
Returns the false negative rate.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "falseNegativeRate", "(I)D", class_index)
@property
def weighted_false_negative_rate(self):
"""
Returns the weighted false negative rate.
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "weightedFalseNegativeRate", "()D")
def false_positive_rate(self, class_index):
"""
Returns the false positive rate.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "falsePositiveRate", "(I)D", class_index)
@property
def weighted_false_positive_rate(self):
"""
Returns the weighted false positive rate.
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "weightedFalsePositiveRate", "()D")
def num_false_negatives(self, class_index):
"""
Returns the number of false negatives.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the count
:rtype: float
"""
return javabridge.call(self.jobject, "numFalseNegatives", "(I)D", class_index)
def true_negative_rate(self, class_index):
"""
Returns the true negative rate.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "trueNegativeRate", "(I)D", class_index)
@property
def weighted_true_negative_rate(self):
"""
Returns the weighted true negative rate.
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "weightedTrueNegativeRate", "()D")
def num_true_negatives(self, class_index):
"""
Returns the number of true negatives.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the count
:rtype: float
"""
return javabridge.call(self.jobject, "numTrueNegatives", "(I)D", class_index)
def num_false_positives(self, class_index):
"""
Returns the number of false positives.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the count
:rtype: float
"""
return javabridge.call(self.jobject, "numFalsePositives", "(I)D", class_index)
def true_positive_rate(self, class_index):
"""
Returns the true positive rate.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "truePositiveRate", "(I)D", class_index)
@property
def weighted_true_positive_rate(self):
"""
Returns the weighted true positive rate.
:return: the rate
:rtype: float
"""
return javabridge.call(self.jobject, "weightedTruePositiveRate", "()D")
def num_true_positives(self, class_index):
"""
Returns the number of true positives.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the count
:rtype: float
"""
return javabridge.call(self.jobject, "numTruePositives", "(I)D", class_index)
def f_measure(self, class_index):
"""
Returns the f measure.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the measure
:rtype: float
"""
return javabridge.call(self.jobject, "fMeasure", "(I)D", class_index)
@property
def weighted_f_measure(self):
"""
Returns the weighted f measure.
:return: the measure
:rtype: float
"""
return javabridge.call(self.jobject, "weightedFMeasure", "()D")
@property
def unweighted_macro_f_measure(self):
"""
Returns the unweighted macro-averaged F-measure.
:return: the measure
:rtype: float
"""
return javabridge.call(self.jobject, "unweightedMacroFmeasure", "()D")
@property
def unweighted_micro_f_measure(self):
"""
Returns the unweighted micro-averaged F-measure.
:return: the measure
:rtype: float
"""
return javabridge.call(self.jobject, "unweightedMicroFmeasure", "()D")
def precision(self, class_index):
"""
Returns the precision.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the precision
:rtype: float
"""
return javabridge.call(self.jobject, "precision", "(I)D", class_index)
@property
def weighted_precision(self):
"""
Returns the weighted precision.
:return: the precision
:rtype: float
"""
return javabridge.call(self.jobject, "weightedPrecision", "()D")
def recall(self, class_index):
"""
Returns the recall.
:param class_index: the 0-based index of the class label
:type class_index: int
:return: the recall
:rtype: float
"""
return javabridge.call(self.jobject, "recall", "(I)D", class_index)
@property
def weighted_recall(self):
"""
Returns the weighted recall.
:return: the recall
:rtype: float
"""
return javabridge.call(self.jobject, "weightedRecall", "()D")
@property
def kappa(self):
"""
Returns kappa.
:return: kappa
:rtype: float
"""
return javabridge.call(self.jobject, "kappa", "()D")
@property
def kb_information(self):
"""
Returns KB information.
:return: the information
:rtype: float
"""
return javabridge.call(self.jobject, "KBInformation", "()D")
@property
def kb_mean_information(self):
"""
Returns KB mean information.
:return: the information
:rtype: float
"""
return javabridge.call(self.jobject, "KBMeanInformation", "()D")
@property
def kb_relative_information(self):
"""
Returns KB relative information.
:return: the information
:rtype: float
"""
return javabridge.call(self.jobject, "KBRelativeInformation", "()D")
@property
def sf_entropy_gain(self):
"""
Returns the total SF, which is the null model entropy minus the scheme entropy.
:return: the gain
:rtype: float
"""
return javabridge.call(self.jobject, "SFEntropyGain", "()D")
@property
def sf_mean_entropy_gain(self):
"""
Returns the SF per instance, which is the null model entropy minus the scheme entropy, per instance.
:return: the gain
:rtype: float
"""
return javabridge.call(self.jobject, "SFMeanEntropyGain", "()D")
@property
def sf_mean_prior_entropy(self):
"""
Returns the entropy per instance for the null model.
:return: the entropy
:rtype: float
"""
return javabridge.call(self.jobject, "SFMeanPriorEntropy", "()D")
@property
def sf_mean_scheme_entropy(self):
"""
Returns the entropy per instance for the scheme.
:return: the entropy
:rtype: float
"""
return javabridge.call(self.jobject, "SFMeanSchemeEntropy", "()D")
@property
def sf_prior_entropy(self):
"""
Returns the total entropy for the null model.
:return: the entropy
:rtype: float
"""
return javabridge.call(self.jobject, "SFPriorEntropy", "()D")
@property
def sf_scheme_entropy(self):
"""
Returns the total entropy for the scheme.
:return: the entropy
:rtype: float
"""
return javabridge.call(self.jobject, "SFchemeEntropy", "()D")
@property
def class_priors(self):
"""
Returns the class priors.
:return: the priors
:rtype: ndarray
"""
return javabridge.get_env().get_double_array_elements(javabridge.call(self.jobject, "getClassPriors", "()[D"))
@class_priors.setter
def class_priors(self, data):
"""
Sets the class priors derived from the dataset.
:param data: the dataset to derive the priors from
:type data: Instances
"""
javabridge.call(self.jobject, "setClassPriors", "(Lweka/core/Instances;)V", data)
@property
def header(self):
"""
Returns the header format.
:return: the header format
:rtype: Instances
"""
return Instances(javabridge.call(self.jobject, "getHeader", "()Lweka/core/Instances;"))
@property
def discard_predictions(self):
"""
Returns whether to discard predictions (saves memory).
:return: True if to discard
:rtype: bool
"""
return javabridge.call(self.jobject, "getDiscardPredictions", "()Z")
@discard_predictions.setter
def discard_predictions(self, discard):
"""
Sets whether to discard predictions (saves memory).
:param discard: True if to discard predictions
:type discard: bool
"""
javabridge.call(self.jobject, "setDiscardPredictions", "(Z)V", discard)
@property
def predictions(self):
"""
Returns the predictions.
:return: the predictions. None if not available
:rtype: list
"""
preds = javabridge.get_collection_wrapper(
javabridge.call(self.jobject, "predictions", "()Ljava/util/ArrayList;"))
if self.discard_predictions:
result = None
else:
result = []
for pred in preds:
if is_instance_of(pred, "weka.classifiers.evaluation.NominalPrediction"):
result.append(NominalPrediction(pred))
elif is_instance_of(pred, "weka.classifiers.evaluation.NumericPrediction"):
result.append(NumericPrediction(pred))
else:
result.append(Prediction(pred))
return result
@classmethod
def evaluate_model(cls, classifier, args):
"""
Evaluates the classifier with the given options.
:param classifier: the classifier instance to use
:type classifier: Classifier
:param args: the command-line arguments to use
:type args: list
:return: the evaluation string
:rtype: str
"""
return javabridge.static_call(
"Lweka/classifiers/Evaluation;", "evaluateModel",
"(Lweka/classifiers/Classifier;[Ljava/lang/String;)Ljava/lang/String;",
classifier.jobject, args)
class PredictionOutput(OptionHandler):
"""
For collecting predictions and generating output from.
Must be derived from weka.classifiers.evaluation.output.prediction.AbstractOutput
"""
def __init__(self, classname="weka.classifiers.evaluation.output.prediction.PlainText", jobject=None, options=None):
"""
Initializes the specified output generator using either the classname or the supplied JB_Object.
:param classname: the classname of the generator
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = PredictionOutput.new_instance(classname)
self.enforce_type(jobject, "weka.classifiers.evaluation.output.prediction.AbstractOutput")
super(PredictionOutput, self).__init__(jobject=jobject, options=options)
buf = javabridge.make_instance("java/lang/StringBuffer", "()V")
javabridge.call(self.jobject, "setBuffer", "(Ljava/lang/StringBuffer;)V", buf)
@property
def header(self):
"""
Returns the header format.
:return: The dataset format
:rtype: Instances
"""
return Instances(javabridge.call(self.jobject, "getHeader", "()Lweka/core/Instances;"))
@header.setter
def header(self, data):
"""
Sets the header format.
:param data: The dataset format
:type data: Instances
"""
javabridge.call(self.jobject, "setHeader", "(Lweka/core/Instances;)V", data.jobject)
def print_header(self):
"""
Prints the header to the buffer.
"""
javabridge.call(self.jobject, "printHeader", "()V")
def print_footer(self):
"""
Prints the footer to the buffer.
"""
javabridge.call(self.jobject, "printFooter", "()V")
def print_all(self, cls, data):
"""
Prints the header, classifications and footer to the buffer.
:param cls: the classifier
:type cls: Classifier
:param data: the test data
:type data: Instances
"""
javabridge.call(
self.jobject, "print", "(Lweka/classifiers/Classifier;Lweka/core/Instances;)V",
cls.jobject, data.jobject)
def print_classifications(self, cls, data):
"""
Prints the classifications to the buffer.
:param cls: the classifier
:type cls: Classifier
:param data: the test data
:type data: Instances
"""
javabridge.call(
self.jobject, "printClassifications", "(Lweka/classifiers/Classifier;Lweka/core/Instances;)V",
cls.jobject, data.jobject)
def print_classification(self, cls, inst, index):
"""
Prints the classification to the buffer.
:param cls: the classifier
:type cls: Classifier
:param inst: the test instance
:type inst: Instance
:param index: the 0-based index of the test instance
:type index: int
"""
javabridge.call(
self.jobject, "printClassification", "(Lweka/classifiers/Classifier;Lweka/core/Instance;I)V",
cls.jobject, inst.jobject, index)
def buffer_content(self):
"""
Returns the content of the buffer as string.
:return: The buffer content
:rtype: str
"""
return javabridge.to_string(javabridge.call(self.jobject, "getBuffer", "()Ljava/lang/StringBuffer;"))
def __str__(self):
"""
Returns the content of the buffer.
:return: the current buffer content
:rtype: str
"""
return self.buffer_content()
def predictions_to_instances(data, preds):
"""
Turns the predictions turned into an Instances object.
:param data: the original dataset format
:type data: Instances
:param preds: the predictions to convert
:type preds: list
:return: the predictions, None if no predictions present
:rtype: Instances
"""
if len(preds) == 0:
return None
is_numeric = isinstance(preds[0], NumericPrediction)
# create header
atts = []
if is_numeric:
atts.append(Attribute.create_numeric("index"))
atts.append(Attribute.create_numeric("weight"))
atts.append(Attribute.create_numeric("actual"))
atts.append(Attribute.create_numeric("predicted"))
atts.append(Attribute.create_numeric("error"))
else:
atts.append(Attribute.create_numeric("index"))
atts.append(Attribute.create_numeric("weight"))
atts.append(data.class_attribute.copy(name="actual"))
atts.append(data.class_attribute.copy(name="predicted"))
atts.append(Attribute.create_nominal("error", ["no", "yes"]))
atts.append(Attribute.create_numeric("classification"))
for i in xrange(data.class_attribute.num_values):
atts.append(Attribute.create_numeric("distribution-" + data.class_attribute.value(i)))
result = Instances.create_instances("Predictions", atts, len(preds))
count = 0
for pred in preds:
count += 1
if is_numeric:
values = array([count, pred.weight, pred.actual, pred.predicted, pred.error])
else:
if pred.actual == pred.predicted:
error = 0.0
else:
error = 1.0
l = [count, pred.weight, pred.actual, pred.predicted, error, max(pred.distribution)]
for i in xrange(data.class_attribute.num_values):
l.append(pred.distribution[i])
values = array(l)
inst = Instance.create_instance(values)
result.add_instance(inst)
return result
if __name__ == "__main__":
try:
main()
except Exception, ex:
print(ex)
|
fracpete/python-weka-wrapper | python/weka/classifiers.py | Classifier.distributions_for_instances | python | def distributions_for_instances(self, data):
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None | Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/classifiers.py#L120-L132 | null | class Classifier(OptionHandler):
"""
Wrapper class for classifiers.
"""
def __init__(self, classname="weka.classifiers.rules.ZeroR", jobject=None, options=None):
"""
Initializes the specified classifier using either the classname or the supplied JB_Object.
:param classname: the classname of the classifier
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = Classifier.new_instance(classname)
self.enforce_type(jobject, "weka.classifiers.Classifier")
self.is_updateable = self.check_type(jobject, "weka.classifiers.UpdateableClassifier")
self.is_drawable = self.check_type(jobject, "weka.core.Drawable")
self.is_batchpredictor = self.check_type(jobject, "weka.core.BatchPredictor")
super(Classifier, self).__init__(jobject=jobject, options=options)
self.__classify = javabridge.make_call(self.jobject, "classifyInstance", "(Lweka/core/Instance;)D")
self.__distribution = javabridge.make_call(self.jobject, "distributionForInstance", "(Lweka/core/Instance;)[D")
if self.is_batchpredictor:
self.__distributions = javabridge.make_call(
self.jobject, "distributionsForInstances", "(Lweka/core/Instances;)[[D")
@property
def capabilities(self):
"""
Returns the capabilities of the classifier.
:return: the capabilities
:rtype: Capabilities
"""
return Capabilities(javabridge.call(self.jobject, "getCapabilities", "()Lweka/core/Capabilities;"))
def build_classifier(self, data):
"""
Builds the classifier with the data.
:param data: the data to train the classifier with
:type data: Instances
"""
javabridge.call(self.jobject, "buildClassifier", "(Lweka/core/Instances;)V", data.jobject)
def update_classifier(self, inst):
"""
Updates the classifier with the instance.
:param inst: the Instance to update the classifier with
:type inst: Instance
"""
if self.is_updateable:
javabridge.call(self.jobject, "updateClassifier", "(Lweka/core/Instance;)V", inst.jobject)
else:
logger.critical(classes.get_classname(self.jobject) + " is not updateable!")
def classify_instance(self, inst):
"""
Peforms a prediction.
:param inst: the Instance to get a prediction for
:type inst: Instance
:return: the classification (either regression value or 0-based label index)
:rtype: float
"""
return self.__classify(inst.jobject)
def distribution_for_instance(self, inst):
"""
Peforms a prediction, returning the class distribution.
:param inst: the Instance to get the class distribution for
:type inst: Instance
:return: the class distribution array
:rtype: ndarray
"""
pred = self.__distribution(inst.jobject)
return javabridge.get_env().get_double_array_elements(pred)
@property
def batch_size(self):
"""
Returns the batch size, in case this classifier is a batch predictor.
:return: the batch size, None if not a batch predictor
:rtype: str
"""
if self.is_batchpredictor:
return javabridge.call(self.jobject, "getBatchSize", "()Ljava/lang/String;")
else:
return None
@batch_size.setter
def batch_size(self, size):
"""
Sets the batch size, in case this classifier is a batch predictor.
:param size: the size of the batch
:type size: str
"""
if self.is_batchpredictor:
javabridge.call(self.jobject, "setBatchSize", "(Ljava/lang/String;)V", size)
def has_efficient_batch_prediction(self):
"""
Returns whether the classifier implements a more efficient batch prediction.
:return: True if a more efficient batch prediction is implemented, always False if not batch predictor
:rtype: bool
"""
if self.is_batchpredictor:
return javabridge.call(self.jobject, "implementsMoreEfficientBatchPrediction", "()Z")
else:
return False
@property
def graph_type(self):
"""
Returns the graph type if classifier implements weka.core.Drawable, otherwise -1.
:return: the type
:rtype: int
"""
if self.is_drawable:
return javabridge.call(self.jobject, "graphType", "()I")
else:
return -1
@property
def graph(self):
"""
Returns the graph if classifier implements weka.core.Drawable, otherwise None.
:return: the generated graph string
:rtype: str
"""
if self.is_drawable:
return javabridge.call(self.jobject, "graph", "()Ljava/lang/String;")
else:
return None
def to_source(self, classname):
"""
Returns the model as Java source code if the classifier implements weka.classifiers.Sourcable.
:param classname: the classname for the generated Java code
:type classname: str
:return: the model as source code string
:rtype: str
"""
if not self.check_type(self.jobject, "weka.classifiers.Sourcable"):
return None
return javabridge.call(self.jobject, "toSource", "(Ljava/lang/String;)Ljava/lang/String;", classname)
@classmethod
def make_copy(cls, classifier):
"""
Creates a copy of the classifier.
:param classifier: the classifier to copy
:type classifier: Classifier
:return: the copy of the classifier
:rtype: Classifier
"""
return Classifier(
jobject=javabridge.static_call(
"weka/classifiers/AbstractClassifier", "makeCopy",
"(Lweka/classifiers/Classifier;)Lweka/classifiers/Classifier;", classifier.jobject))
|
fracpete/python-weka-wrapper | python/weka/core/dataset.py | Instances.values | python | def values(self, index):
values = []
for i in xrange(self.num_instances):
inst = self.get_instance(i)
values.append(inst.get_value(index))
return numpy.array(values) | Returns the internal values of this attribute from all the instance objects.
:return: the values as numpy array
:rtype: list | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/dataset.py#L144-L155 | null | class Instances(JavaObject):
"""
Wrapper class for weka.core.Instances.
"""
def __init__(self, jobject):
"""
Initializes the weka.core.Instances wrapper.
:param jobject: the weka.core.Instances object to wrap
:type jobject: JB_Object
"""
self.enforce_type(jobject, "weka.core.Instances")
super(Instances, self).__init__(jobject)
self.__attribute = javabridge.make_call(self.jobject, "attribute", "(I)Lweka/core/Attribute;")
self.__attribute_by_name = javabridge.make_call(self.jobject, "attribute", "(Ljava/lang/String;)Lweka/core/Attribute;")
self.__num_attributes = javabridge.make_call(self.jobject, "numAttributes", "()I")
self.__num_instances = javabridge.make_call(self.jobject, "numInstances", "()I")
self.__get_class_index = javabridge.make_call(self.jobject, "classIndex", "()I")
self.__set_class_index = javabridge.make_call(self.jobject, "setClassIndex", "(I)V")
self.__class_attribute = javabridge.make_call(self.jobject, "classAttribute", "()Lweka/core/Attribute;")
self.__get_instance = javabridge.make_call(self.jobject, "instance", "(I)Lweka/core/Instance;")
self.__set_instance = javabridge.make_call(self.jobject, "set", "(ILweka/core/Instance;)Lweka/core/Instance;")
self.__append_instance = javabridge.make_call(self.jobject, "add", "(Lweka/core/Instance;)Z")
self.__insert_instance = javabridge.make_call(self.jobject, "add", "(ILweka/core/Instance;)V")
def __iter__(self):
"""
Allows iterating over the rows.
:return: the iterator
:rtype: InstanceIterator
"""
return InstanceIterator(self)
def __len__(self):
"""
Returns the number of rows in the dataset.
:return: the number of Instance objects
:rtype: int
"""
return self.num_instances
@property
def relationname(self):
"""
Returns the name of the dataset.
:return: the name
:rtype: str
"""
return javabridge.call(self.jobject, "relationName", "()Ljava/lang/String;")
@relationname.setter
def relationname(self, value):
"""
Sets the name of the dataset.
:param value: the name
:type value: str
"""
javabridge.call(self.jobject, "setRelationName", "(Ljava/lang/String;)V", value)
@property
def num_attributes(self):
"""
Returns the number of attributes.
:return: the number of attributes
:rtype: int
"""
return self.__num_attributes()
def attributes(self):
"""
Returns an iterator over the attributes.
"""
return AttributeIterator(self)
def attribute(self, index):
"""
Returns the specified attribute.
:param index: the 0-based index of the attribute
:type index: int
:return: the attribute
:rtype: Attribute
"""
return Attribute(self.__attribute(index))
def attribute_by_name(self, name):
"""
Returns the specified attribute, None if not found.
:param name: the name of the attribute
:type name: str
:return: the attribute or None
:rtype: Attribute
"""
att = self.__attribute_by_name(javabridge.get_env().new_string(name))
if att is None:
return None
else:
return Attribute(att)
def attribute_stats(self, index):
"""
Returns the specified attribute statistics.
:param index: the 0-based index of the attribute
:type index: int
:return: the attribute statistics
:rtype: AttributeStats
"""
return AttributeStats(javabridge.call(self.jobject, "attributeStats", "(I)Lweka/core/AttributeStats;", index))
@property
def num_instances(self):
"""
Returns the number of instances.
:return: the number of instances
:rtype: int
"""
return self.__num_instances()
@property
def class_attribute(self):
"""
Returns the currently set class attribute.
:return: the class attribute
:rtype: Attribute
"""
return Attribute(self.__class_attribute())
@property
def class_index(self):
"""
Returns the currently set class index (0-based).
:return: the class index, -1 if not set
:rtype: int
"""
return self.__get_class_index()
@class_index.setter
def class_index(self, index):
"""
Sets the class index (0-based).
:param index: the new index, use -1 to unset
:type index: int
"""
self.__set_class_index(index)
def has_class(self):
"""
Returns whether a class attribute is set (convenience method).
:return: whether a class attribute is currently set
:rtype: bool
"""
return self.class_index != -1
def no_class(self):
"""
Unsets the class attribute (convenience method).
"""
self.class_index = -1
def class_is_first(self):
"""
Sets the first attribute as class attribute (convenience method).
"""
self.class_index = 0
def class_is_last(self):
"""
Sets the last attribute as class attribute (convenience method).
"""
self.class_index = self.num_attributes - 1
def get_instance(self, index):
"""
Returns the Instance object at the specified location.
:param index: the 0-based index of the instance
:type index: int
:return: the instance
:rtype: Instance
"""
return Instance(self.__get_instance(index))
def add_instance(self, inst, index=None):
"""
Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int
"""
if index is None:
self.__append_instance(inst.jobject)
else:
self.__insert_instance(index, inst.jobject)
def set_instance(self, index, inst):
"""
Sets the Instance at the specified location in the dataset.
:param index: the 0-based index of the instance to replace
:type index: int
:param inst: the Instance to set
:type inst: Instance
:return: the instance
:rtype: Instance
"""
return Instance(
self.__set_instance(index, inst.jobject))
def delete(self, index=None):
"""
Removes either the specified Instance or all Instance objects.
:param index: the 0-based index of the instance to remove
:type index: int
"""
if index is None:
javabridge.call(self.jobject, "delete", "()V")
else:
javabridge.call(self.jobject, "delete", "(I)V", index)
def delete_attribute(self, index):
"""
Deletes an attribute at the given position.
:param index: the 0-based index of the attribute to remove
:type index: int
"""
javabridge.call(self.jobject, "deleteAttributeAt", "(I)V", index)
def delete_first_attribute(self):
"""
Deletes the first attribute.
"""
self.delete_attribute(0)
def delete_last_attribute(self):
"""
Deletes the last attribute.
"""
self.delete_attribute(self.num_attributes - 1)
def delete_attribute_type(self, typ):
"""
Deletes all attributes of the given type in the dataset.
:param typ: the attribute type to remove, see weka.core.Attribute Javadoc
:type typ: int
"""
javabridge.call(self.jobject, "deleteAttributeType", "(I)V", typ)
def delete_with_missing(self, index):
"""
Deletes all rows that have a missing value at the specified attribute index.
:param index: the attribute index to check for missing attributes
:type index: int
"""
javabridge.call(self.jobject, "deleteWithMissing", "(I)V", index)
def insert_attribute(self, att, index):
"""
Inserts the attribute at the specified location.
:param att: the attribute to insert
:type att: Attribute
:param index: the index to insert the attribute at
:type index: int
"""
javabridge.call(self.jobject, "insertAttributeAt", "(Lweka/core/Attribute;I)V", att.jobject, index)
def compactify(self):
"""
Compactifies the set of instances.
"""
javabridge.call(self.jobject, "compactify", "()V")
def sort(self, index):
"""
Sorts the dataset using the specified attribute index.
:param index: the index of the attribute
:type index: int
"""
javabridge.call(self.jobject, "sort", "(I)V", index)
def randomize(self, random):
"""
Randomizes the dataset using the random number generator.
:param random: the random number generator to use
:type random: Random
"""
javabridge.call(self.jobject, "randomize", "(Ljava/util/Random;)V", random.jobject)
def stratify(self, folds):
"""
Stratifies the data after randomization for nominal class attributes.
:param folds: the number of folds to perform the stratification for
:type folds: int
"""
javabridge.call(self.jobject, "stratify", "(I)V", folds)
def train_cv(self, num_folds, fold, random=None):
"""
Generates a training fold for cross-validation.
:param num_folds: the number of folds of cross-validation, eg 10
:type num_folds: int
:param fold: the current fold (0-based)
:type fold: int
:param random: the random number generator
:type random: Random
:return: the training fold
:rtype: Instances
"""
if random is None:
return Instances(
javabridge.call(self.jobject, "trainCV", "(II)Lweka/core/Instances;",
num_folds, fold))
else:
return Instances(
javabridge.call(self.jobject, "trainCV", "(IILjava/util/Random;)Lweka/core/Instances;",
num_folds, fold, random.jobject))
def test_cv(self, num_folds, fold):
"""
Generates a test fold for cross-validation.
:param num_folds: the number of folds of cross-validation, eg 10
:type num_folds: int
:param fold: the current fold (0-based)
:type fold: int
:return: the training fold
:rtype: Instances
"""
return Instances(
javabridge.call(self.jobject, "testCV", "(II)Lweka/core/Instances;",
num_folds, fold))
def equal_headers(self, inst):
"""
Compares this dataset against the given one in terms of attributes.
:param inst: the dataset to compare against
:type inst: Instances
:return: None if the same, otherwise an error message
:rtype: str
"""
return javabridge.call(
self.jobject, "equalHeadersMsg", "(Lweka/core/Instances;)Ljava/lang/String;", inst.jobject)
@classmethod
def copy_instances(cls, dataset, from_row=None, num_rows=None):
"""
Creates a copy of the Instances. If either from_row or num_rows are None, then all of
the data is being copied.
:param dataset: the original dataset
:type dataset: Instances
:param from_row: the 0-based start index of the rows to copy
:type from_row: int
:param num_rows: the number of rows to copy
:type num_rows: int
:return: the copy of the data
:rtype: Instances
"""
if from_row is None or num_rows is None:
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;)V",
dataset.jobject))
else:
dataset = cls.copy_instances(dataset)
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;II)V",
dataset.jobject, from_row, num_rows))
@classmethod
def template_instances(cls, dataset, capacity=0):
"""
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
"""
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;I)V", dataset.jobject, capacity))
@classmethod
def create_instances(cls, name, atts, capacity):
"""
Creates a new Instances.
:param name: the relation name
:type name: str
:param atts: the list of attributes to use for the dataset
:type atts: list of Attribute
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the dataset
:rtype: Instances
"""
attributes = []
for att in atts:
attributes.append(att.jobject)
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Ljava/lang/String;Ljava/util/ArrayList;I)V",
name, javabridge.make_list(attributes), capacity))
@classmethod
def merge_instances(cls, inst1, inst2):
"""
Merges the two datasets (side-by-side).
:param inst1: the first dataset
:type inst1: Instances or str
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances
"""
return Instances(javabridge.static_call(
"weka/core/Instances", "mergeInstances",
"(Lweka/core/Instances;Lweka/core/Instances;)Lweka/core/Instances;", inst1.jobject, inst2.jobject))
@classmethod
def append_instances(cls, inst1, inst2):
"""
Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances
"""
msg = inst1.equal_headers(inst2)
if msg is not None:
raise Exception("Cannot appent instances: " + msg)
result = cls.copy_instances(inst1)
for i in xrange(inst2.num_instances):
result.add_instance(inst2.get_instance(i))
return result
def train_test_split(self, percentage, rnd=None):
"""
Generates a train/test split.
:param percentage: the percentage split to use (amount to use for training; 0-100)
:type percentage: double
:param rnd: the random number generator to use, if None the order gets preserved
:type rnd: Random
:return: the train/test splits
:rtype: tuple
"""
if percentage <= 0:
raise Exception("Split percentage must be > 0, provided: " + str(percentage))
if percentage >= 100:
raise Exception("Split percentage must be < 100, provided: " + str(percentage))
if rnd is not None:
self.randomize(rnd)
train_size = int(round(self.num_instances * percentage / 100))
test_size = self.num_instances - train_size
train_inst = Instances.copy_instances(self, 0, train_size)
test_inst = Instances.copy_instances(self, train_size, test_size)
return train_inst, test_inst
@classmethod
def summary(cls, inst):
"""
Generates a summary of the dataset.
:param inst: the dataset
:type inst: Instances
:return: the summary
:rtype: str
"""
return javabridge.call(inst.jobject, "toSummaryString", "()Ljava/lang/String;")
|
fracpete/python-weka-wrapper | python/weka/core/dataset.py | Instances.append_instances | python | def append_instances(cls, inst1, inst2):
msg = inst1.equal_headers(inst2)
if msg is not None:
raise Exception("Cannot appent instances: " + msg)
result = cls.copy_instances(inst1)
for i in xrange(inst2.num_instances):
result.add_instance(inst2.get_instance(i))
return result | Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/dataset.py#L489-L506 | null | class Instances(JavaObject):
"""
Wrapper class for weka.core.Instances.
"""
def __init__(self, jobject):
"""
Initializes the weka.core.Instances wrapper.
:param jobject: the weka.core.Instances object to wrap
:type jobject: JB_Object
"""
self.enforce_type(jobject, "weka.core.Instances")
super(Instances, self).__init__(jobject)
self.__attribute = javabridge.make_call(self.jobject, "attribute", "(I)Lweka/core/Attribute;")
self.__attribute_by_name = javabridge.make_call(self.jobject, "attribute", "(Ljava/lang/String;)Lweka/core/Attribute;")
self.__num_attributes = javabridge.make_call(self.jobject, "numAttributes", "()I")
self.__num_instances = javabridge.make_call(self.jobject, "numInstances", "()I")
self.__get_class_index = javabridge.make_call(self.jobject, "classIndex", "()I")
self.__set_class_index = javabridge.make_call(self.jobject, "setClassIndex", "(I)V")
self.__class_attribute = javabridge.make_call(self.jobject, "classAttribute", "()Lweka/core/Attribute;")
self.__get_instance = javabridge.make_call(self.jobject, "instance", "(I)Lweka/core/Instance;")
self.__set_instance = javabridge.make_call(self.jobject, "set", "(ILweka/core/Instance;)Lweka/core/Instance;")
self.__append_instance = javabridge.make_call(self.jobject, "add", "(Lweka/core/Instance;)Z")
self.__insert_instance = javabridge.make_call(self.jobject, "add", "(ILweka/core/Instance;)V")
def __iter__(self):
"""
Allows iterating over the rows.
:return: the iterator
:rtype: InstanceIterator
"""
return InstanceIterator(self)
def __len__(self):
"""
Returns the number of rows in the dataset.
:return: the number of Instance objects
:rtype: int
"""
return self.num_instances
@property
def relationname(self):
"""
Returns the name of the dataset.
:return: the name
:rtype: str
"""
return javabridge.call(self.jobject, "relationName", "()Ljava/lang/String;")
@relationname.setter
def relationname(self, value):
"""
Sets the name of the dataset.
:param value: the name
:type value: str
"""
javabridge.call(self.jobject, "setRelationName", "(Ljava/lang/String;)V", value)
@property
def num_attributes(self):
"""
Returns the number of attributes.
:return: the number of attributes
:rtype: int
"""
return self.__num_attributes()
def attributes(self):
"""
Returns an iterator over the attributes.
"""
return AttributeIterator(self)
def attribute(self, index):
"""
Returns the specified attribute.
:param index: the 0-based index of the attribute
:type index: int
:return: the attribute
:rtype: Attribute
"""
return Attribute(self.__attribute(index))
def attribute_by_name(self, name):
"""
Returns the specified attribute, None if not found.
:param name: the name of the attribute
:type name: str
:return: the attribute or None
:rtype: Attribute
"""
att = self.__attribute_by_name(javabridge.get_env().new_string(name))
if att is None:
return None
else:
return Attribute(att)
def attribute_stats(self, index):
"""
Returns the specified attribute statistics.
:param index: the 0-based index of the attribute
:type index: int
:return: the attribute statistics
:rtype: AttributeStats
"""
return AttributeStats(javabridge.call(self.jobject, "attributeStats", "(I)Lweka/core/AttributeStats;", index))
def values(self, index):
"""
Returns the internal values of this attribute from all the instance objects.
:return: the values as numpy array
:rtype: list
"""
values = []
for i in xrange(self.num_instances):
inst = self.get_instance(i)
values.append(inst.get_value(index))
return numpy.array(values)
@property
def num_instances(self):
"""
Returns the number of instances.
:return: the number of instances
:rtype: int
"""
return self.__num_instances()
@property
def class_attribute(self):
"""
Returns the currently set class attribute.
:return: the class attribute
:rtype: Attribute
"""
return Attribute(self.__class_attribute())
@property
def class_index(self):
"""
Returns the currently set class index (0-based).
:return: the class index, -1 if not set
:rtype: int
"""
return self.__get_class_index()
@class_index.setter
def class_index(self, index):
"""
Sets the class index (0-based).
:param index: the new index, use -1 to unset
:type index: int
"""
self.__set_class_index(index)
def has_class(self):
"""
Returns whether a class attribute is set (convenience method).
:return: whether a class attribute is currently set
:rtype: bool
"""
return self.class_index != -1
def no_class(self):
"""
Unsets the class attribute (convenience method).
"""
self.class_index = -1
def class_is_first(self):
"""
Sets the first attribute as class attribute (convenience method).
"""
self.class_index = 0
def class_is_last(self):
"""
Sets the last attribute as class attribute (convenience method).
"""
self.class_index = self.num_attributes - 1
def get_instance(self, index):
"""
Returns the Instance object at the specified location.
:param index: the 0-based index of the instance
:type index: int
:return: the instance
:rtype: Instance
"""
return Instance(self.__get_instance(index))
def add_instance(self, inst, index=None):
"""
Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int
"""
if index is None:
self.__append_instance(inst.jobject)
else:
self.__insert_instance(index, inst.jobject)
def set_instance(self, index, inst):
"""
Sets the Instance at the specified location in the dataset.
:param index: the 0-based index of the instance to replace
:type index: int
:param inst: the Instance to set
:type inst: Instance
:return: the instance
:rtype: Instance
"""
return Instance(
self.__set_instance(index, inst.jobject))
def delete(self, index=None):
"""
Removes either the specified Instance or all Instance objects.
:param index: the 0-based index of the instance to remove
:type index: int
"""
if index is None:
javabridge.call(self.jobject, "delete", "()V")
else:
javabridge.call(self.jobject, "delete", "(I)V", index)
def delete_attribute(self, index):
"""
Deletes an attribute at the given position.
:param index: the 0-based index of the attribute to remove
:type index: int
"""
javabridge.call(self.jobject, "deleteAttributeAt", "(I)V", index)
def delete_first_attribute(self):
"""
Deletes the first attribute.
"""
self.delete_attribute(0)
def delete_last_attribute(self):
"""
Deletes the last attribute.
"""
self.delete_attribute(self.num_attributes - 1)
def delete_attribute_type(self, typ):
"""
Deletes all attributes of the given type in the dataset.
:param typ: the attribute type to remove, see weka.core.Attribute Javadoc
:type typ: int
"""
javabridge.call(self.jobject, "deleteAttributeType", "(I)V", typ)
def delete_with_missing(self, index):
"""
Deletes all rows that have a missing value at the specified attribute index.
:param index: the attribute index to check for missing attributes
:type index: int
"""
javabridge.call(self.jobject, "deleteWithMissing", "(I)V", index)
def insert_attribute(self, att, index):
"""
Inserts the attribute at the specified location.
:param att: the attribute to insert
:type att: Attribute
:param index: the index to insert the attribute at
:type index: int
"""
javabridge.call(self.jobject, "insertAttributeAt", "(Lweka/core/Attribute;I)V", att.jobject, index)
def compactify(self):
"""
Compactifies the set of instances.
"""
javabridge.call(self.jobject, "compactify", "()V")
def sort(self, index):
"""
Sorts the dataset using the specified attribute index.
:param index: the index of the attribute
:type index: int
"""
javabridge.call(self.jobject, "sort", "(I)V", index)
def randomize(self, random):
"""
Randomizes the dataset using the random number generator.
:param random: the random number generator to use
:type random: Random
"""
javabridge.call(self.jobject, "randomize", "(Ljava/util/Random;)V", random.jobject)
def stratify(self, folds):
"""
Stratifies the data after randomization for nominal class attributes.
:param folds: the number of folds to perform the stratification for
:type folds: int
"""
javabridge.call(self.jobject, "stratify", "(I)V", folds)
def train_cv(self, num_folds, fold, random=None):
"""
Generates a training fold for cross-validation.
:param num_folds: the number of folds of cross-validation, eg 10
:type num_folds: int
:param fold: the current fold (0-based)
:type fold: int
:param random: the random number generator
:type random: Random
:return: the training fold
:rtype: Instances
"""
if random is None:
return Instances(
javabridge.call(self.jobject, "trainCV", "(II)Lweka/core/Instances;",
num_folds, fold))
else:
return Instances(
javabridge.call(self.jobject, "trainCV", "(IILjava/util/Random;)Lweka/core/Instances;",
num_folds, fold, random.jobject))
def test_cv(self, num_folds, fold):
"""
Generates a test fold for cross-validation.
:param num_folds: the number of folds of cross-validation, eg 10
:type num_folds: int
:param fold: the current fold (0-based)
:type fold: int
:return: the training fold
:rtype: Instances
"""
return Instances(
javabridge.call(self.jobject, "testCV", "(II)Lweka/core/Instances;",
num_folds, fold))
def equal_headers(self, inst):
"""
Compares this dataset against the given one in terms of attributes.
:param inst: the dataset to compare against
:type inst: Instances
:return: None if the same, otherwise an error message
:rtype: str
"""
return javabridge.call(
self.jobject, "equalHeadersMsg", "(Lweka/core/Instances;)Ljava/lang/String;", inst.jobject)
@classmethod
def copy_instances(cls, dataset, from_row=None, num_rows=None):
"""
Creates a copy of the Instances. If either from_row or num_rows are None, then all of
the data is being copied.
:param dataset: the original dataset
:type dataset: Instances
:param from_row: the 0-based start index of the rows to copy
:type from_row: int
:param num_rows: the number of rows to copy
:type num_rows: int
:return: the copy of the data
:rtype: Instances
"""
if from_row is None or num_rows is None:
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;)V",
dataset.jobject))
else:
dataset = cls.copy_instances(dataset)
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;II)V",
dataset.jobject, from_row, num_rows))
@classmethod
def template_instances(cls, dataset, capacity=0):
"""
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
"""
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;I)V", dataset.jobject, capacity))
@classmethod
def create_instances(cls, name, atts, capacity):
"""
Creates a new Instances.
:param name: the relation name
:type name: str
:param atts: the list of attributes to use for the dataset
:type atts: list of Attribute
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the dataset
:rtype: Instances
"""
attributes = []
for att in atts:
attributes.append(att.jobject)
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Ljava/lang/String;Ljava/util/ArrayList;I)V",
name, javabridge.make_list(attributes), capacity))
@classmethod
def merge_instances(cls, inst1, inst2):
"""
Merges the two datasets (side-by-side).
:param inst1: the first dataset
:type inst1: Instances or str
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances
"""
return Instances(javabridge.static_call(
"weka/core/Instances", "mergeInstances",
"(Lweka/core/Instances;Lweka/core/Instances;)Lweka/core/Instances;", inst1.jobject, inst2.jobject))
@classmethod
def train_test_split(self, percentage, rnd=None):
"""
Generates a train/test split.
:param percentage: the percentage split to use (amount to use for training; 0-100)
:type percentage: double
:param rnd: the random number generator to use, if None the order gets preserved
:type rnd: Random
:return: the train/test splits
:rtype: tuple
"""
if percentage <= 0:
raise Exception("Split percentage must be > 0, provided: " + str(percentage))
if percentage >= 100:
raise Exception("Split percentage must be < 100, provided: " + str(percentage))
if rnd is not None:
self.randomize(rnd)
train_size = int(round(self.num_instances * percentage / 100))
test_size = self.num_instances - train_size
train_inst = Instances.copy_instances(self, 0, train_size)
test_inst = Instances.copy_instances(self, train_size, test_size)
return train_inst, test_inst
@classmethod
def summary(cls, inst):
"""
Generates a summary of the dataset.
:param inst: the dataset
:type inst: Instances
:return: the summary
:rtype: str
"""
return javabridge.call(inst.jobject, "toSummaryString", "()Ljava/lang/String;")
|
fracpete/python-weka-wrapper | python/weka/core/dataset.py | Attribute.values | python | def values(self):
enm = javabridge.call(self.jobject, "enumerateValues", "()Ljava/util/Enumeration;")
if enm is None:
return None
else:
return types.enumeration_to_list(enm) | Returns the labels, strings or relation-values.
:return: all the values, None if not NOMINAL, STRING, or RELATION
:rtype: list | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/dataset.py#L913-L924 | null | class Attribute(JavaObject):
"""
Wrapper class for weka.core.Attribute.
"""
def __init__(self, jobject):
"""
Initializes the weka.core.Attribute wrapper.
:param jobject: the JB_Object
:type jobject: JB_Object
"""
self.enforce_type(jobject, "weka.core.Attribute")
super(Attribute, self).__init__(jobject)
@property
def name(self):
"""
Returns the name of the attribute.
:return: the name
:rtype: str
"""
return javabridge.call(self.jobject, "name", "()Ljava/lang/String;")
@property
def index(self):
"""
Returns the index of this attribute.
:return: the index
:rtype: int
"""
return javabridge.call(self.jobject, "index", "()I")
@property
def weight(self):
"""
Returns the weight of the attribute.
:return: the weight
:rtype: float
"""
return javabridge.call(self.jobject, "weight", "()D")
@weight.setter
def weight(self, weight):
"""
Sets the weight of the attribute.
:param weight: the weight of the attribute
:type weight: float
"""
javabridge.call(self.jobject, "setWeight", "(D)V", weight)
def index_of(self, label):
"""
Returns the index of the label in this attribute.
:param label: the string label to get the index for
:type label: str
:return: the 0-based index
:rtype: int
"""
return javabridge.call(self.jobject, "indexOfValue", "(Ljava/lang/String;)I", label)
def value(self, index):
"""
Returns the label for the index.
:param index: the 0-based index of the label to return
:type index: int
:return: the label
:rtype: str
"""
return javabridge.call(self.jobject, "value", "(I)Ljava/lang/String;", index)
@property
def num_values(self):
"""
Returns the number of labels.
:return: the number of labels
:rtype: int
"""
return javabridge.call(self.jobject, "numValues", "()I")
@property
@property
def ordering(self):
"""
Returns the ordering of the attribute.
:return: the ordering (ORDERING_SYMBOLIC, ORDERING_ORDERED, ORDERING_MODULO)
:rtype: int
"""
return javabridge.call(self.jobject, "ordering", "()I")
@property
def type(self):
"""
Returns the type of the attribute. See weka.core.Attribute Javadoc.
:return: the type
:rtype: int
"""
return javabridge.call(self.jobject, "type", "()I")
def type_str(self, short=False):
"""
Returns the type of the attribute as string.
:return: the type
:rtype: str
"""
if short:
return javabridge.static_call(
"weka/core/Attribute", "typeToStringShort", "(Lweka/core/Attribute;)Ljava/lang/String;",
self.jobject)
else:
return javabridge.static_call(
"weka/core/Attribute", "typeToString", "(Lweka/core/Attribute;)Ljava/lang/String;",
self.jobject)
@property
def is_averagable(self):
"""
Returns whether the attribute is averagable.
:return: whether averagable
:rtype: bool
"""
return javabridge.call(self.jobject, "isAveragable", "()Z")
@property
def is_date(self):
"""
Returns whether the attribute is a date one.
:return: whether date attribute
:rtype: bool
"""
return javabridge.call(self.jobject, "isDate", "()Z")
@property
def is_nominal(self):
"""
Returns whether the attribute is a nominal one.
:return: whether nominal attribute
:rtype: bool
"""
return javabridge.call(self.jobject, "isNominal", "()Z")
@property
def is_numeric(self):
"""
Returns whether the attribute is a numeric one (date or numeric).
:return: whether numeric attribute
:rtype: bool
"""
return javabridge.call(self.jobject, "isNumeric", "()Z")
@property
def is_relation_valued(self):
"""
Returns whether the attribute is a relation valued one.
:return: whether relation valued attribute
:rtype: bool
"""
return javabridge.call(self.jobject, "isRelationValued", "()Z")
@property
def is_string(self):
"""
Returns whether the attribute is a string attribute.
:return: whether string attribute
:rtype: bool
"""
return javabridge.call(self.jobject, "isString", "()Z")
@property
def date_format(self):
"""
Returns the format of this data attribute. See java.text.SimpleDateFormat Javadoc.
:return: the format string
:rtype: str
"""
return javabridge.call(self.jobject, "getDateFormat", "()Ljava/lang/String;")
@property
def lower_numeric_bound(self):
"""
Returns the lower numeric bound of the numeric attribute.
:return: the lower bound
:rtype: float
"""
return javabridge.call(self.jobject, "getLowerNumericBound", "()D")
@property
def upper_numeric_bound(self):
"""
Returns the upper numeric bound of the numeric attribute.
:return: the upper bound
:rtype: float
"""
return javabridge.call(self.jobject, "getUpperNumericBound", "()D")
def is_in_range(self, value):
"""
Checks whether the value is within the bounds of the numeric attribute.
:param value: the numeric value to check
:type value: float
:return: whether between lower and upper bound
:rtype: bool
"""
return javabridge.call(self.jobject, "isInRange", "(D)Z", value)
def add_string_value(self, s):
"""
Adds the string value, returns the index.
:param s: the string to add
:type s: str
:return: the index
:rtype: int
"""
return javabridge.call(self.jobject, "addStringValue", "(Ljava/lang/String;)I", s)
def add_relation(self, instances):
"""
Adds the relation value, returns the index.
:param instances: the Instances object to add
:type instances: Instances
:return: the index
:rtype: int
"""
return javabridge.call(self.jobject, "addRelation", "(Lweka/core/Instances;)I", instances.jobject)
def parse_date(self, s):
"""
Parses the date string and returns the internal format value.
:param s: the date string
:type s: str
:return: the internal format
:rtype: float
"""
return javabridge.call(self.jobject, "parseDate", "(Ljava/lang/String;)D", s)
def equals(self, att):
"""
Checks whether this attributes is the same as the provided one.
:param att: the Attribute to check against
:type att: Attribute
:return: whether the same
:rtype: bool
"""
return javabridge.call(self.jobject, "equals", "(Lweka/core/Attribute;)Z", att.jobject)
def equals_msg(self, att):
"""
Checks whether this attributes is the same as the provided one.
Returns None if the same, otherwise error message.
:param att: the Attribute to check against
:type att: Attribute
:return: None if the same, otherwise error message
:rtype: str
"""
return javabridge.call(self.jobject, "equalsMsg", "(Lweka/core/Attribute;)Ljava/lang/String;", att.jobject)
def copy(self, name=None):
"""
Creates a copy of this attribute.
:param name: the new name, uses the old one if None
:type name: str
:return: the copy of the attribute
:rtype: Attribute
"""
if name is None:
return Attribute(
javabridge.call(self.jobject, "copy", "()Ljava/lang/Object;"))
else:
return Attribute(
javabridge.call(self.jobject, "copy", "(Ljava/lang/String;)Lweka/core/Attribute;", name))
@classmethod
def create_numeric(cls, name):
"""
Creates a numeric attribute.
:param name: the name of the attribute
:type name: str
"""
return Attribute(
javabridge.make_instance(
"weka/core/Attribute", "(Ljava/lang/String;)V", name))
@classmethod
def create_date(cls, name, formt="yyyy-MM-dd'T'HH:mm:ss"):
"""
Creates a date attribute.
:param name: the name of the attribute
:type name: str
:param formt: the date format, see Javadoc for java.text.SimpleDateFormat
:type formt: str
"""
return Attribute(
javabridge.make_instance(
"weka/core/Attribute", "(Ljava/lang/String;Ljava/lang/String;)V", name, formt))
@classmethod
def create_nominal(cls, name, labels):
"""
Creates a nominal attribute.
:param name: the name of the attribute
:type name: str
:param labels: the list of string labels to use
:type labels: list
"""
return Attribute(
javabridge.make_instance(
"weka/core/Attribute", "(Ljava/lang/String;Ljava/util/List;)V", name, javabridge.make_list(labels)))
@classmethod
def create_string(cls, name):
"""
Creates a string attribute.
:param name: the name of the attribute
:type name: str
"""
return Attribute(
javabridge.make_instance(
"weka/core/Attribute", "(Ljava/lang/String;Ljava/util/List;)V", name, None))
@classmethod
def create_relational(cls, name, inst):
"""
Creates a relational attribute.
:param name: the name of the attribute
:type name: str
:param inst: the structure of the relational attribute
:type inst: Instances
"""
return Attribute(
javabridge.make_instance(
"weka/core/Attribute", "(Ljava/lang/String;Lweka/core/Instances;)V", name, inst.jobject))
|
fracpete/python-weka-wrapper | python/weka/core/dataset.py | InstanceIterator.next | python | def next(self):
if self.row < self.data.num_instances:
index = self.row
self.row += 1
return self.data.get_instance(index)
else:
raise StopIteration() | Returns the next row from the Instances object.
:return: the next Instance object
:rtype: Instance | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/dataset.py#L1405-L1417 | null | class InstanceIterator(object):
"""
Iterator for rows in an Instances object.
"""
def __init__(self, data):
"""
Initializes the iterator.
:param data: the Instances object to iterate over
:type data: Instances
"""
self.data = data
self.row = 0
def __iter__(self):
"""
Returns itself.
"""
return self
|
fracpete/python-weka-wrapper | python/weka/core/dataset.py | AttributeIterator.next | python | def next(self):
if self.col < self.data.num_attributes:
index = self.col
self.col += 1
return self.data.attribute(index)
else:
raise StopIteration() | Returns the next attribute from the Instances object.
:return: the next Attribute object
:rtype: Attribute | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/dataset.py#L1440-L1452 | null | class AttributeIterator(object):
"""
Iterator for attributes in an Instances object.
"""
def __init__(self, data):
"""
Initializes the iterator.
:param data: the Instances object to iterate over
:type data: Instances
"""
self.data = data
self.col = 0
def __iter__(self):
"""
Returns itself.
"""
return self
|
jtambasco/modesolverpy | modesolverpy/mode_solver.py | _ModeSolver.solve_sweep_structure | python | def solve_sweep_structure(
self,
structures,
sweep_param_list,
filename="structure_n_effs.dat",
plot=True,
x_label="Structure number",
fraction_mode_list=[],
):
n_effs = []
mode_types = []
fractions_te = []
fractions_tm = []
for s in tqdm.tqdm(structures, ncols=70):
self.solve(s)
n_effs.append(np.real(self.n_effs))
mode_types.append(self._get_mode_types())
fractions_te.append(self.fraction_te)
fractions_tm.append(self.fraction_tm)
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, sweep_param_list
)
with open(self._modes_directory + "mode_types.dat", "w") as fs:
header = ",".join(
"Mode%i" % i for i, _ in enumerate(mode_types[0])
)
fs.write("# " + header + "\n")
for mt in mode_types:
txt = ",".join("%s %.2f" % pair for pair in mt)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_te.dat", "w") as fs:
header = "fraction te"
fs.write("# param sweep," + header + "\n")
for param, fte in zip(sweep_param_list, fractions_te):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in fte)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_tm.dat", "w") as fs:
header = "fraction tm"
fs.write("# param sweep," + header + "\n")
for param, ftm in zip(sweep_param_list, fractions_tm):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in ftm)
fs.write(txt + "\n")
if plot:
if MPL:
title = "$n_{eff}$ vs %s" % x_label
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs %s" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename, self._modes_directory + "fraction_te.dat", x_label, y_label, title
)
title = "TE Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_te.dat",
x_label,
"TE Fraction [%]",
title,
fraction_mode_list,
)
title = "TM Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_tm.dat",
x_label,
"TM Fraction [%]",
title,
fraction_mode_list,
)
return n_effs | Find the modes of many structures.
Args:
structures (list): A list of `Structures` to find the modes
of.
sweep_param_list (list): A list of the parameter-sweep sweep
that was used. This is for plotting purposes only.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'structure_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
x_label (str): x-axis text to display in the plot.
fraction_mode_list (list): A list of mode indices of the modes
that should be included in the TE/TM mode fraction plot.
If the list is empty, all modes will be included. The list
is empty by default.
Returns:
list: A list of the effective indices found for each structure. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/mode_solver.py#L92-L192 | null | class _ModeSolver(with_metaclass(abc.ABCMeta)):
def __init__(
self,
n_eigs,
tol=0.0,
boundary="0000",
mode_profiles=True,
initial_mode_guess=None,
n_eff_guess=None,
):
self._n_eigs = int(n_eigs)
self._tol = tol
self._boundary = boundary
self._mode_profiles = mode_profiles
self._initial_mode_guess = initial_mode_guess
self._n_eff_guess = n_eff_guess
self.n_effs = None
self.modes = None
self.mode_types = None
self.overlaps = None
self._path = os.path.dirname(sys.modules[__name__].__file__) + "/"
@abc.abstractproperty
def _modes_directory(self):
pass
@abc.abstractmethod
def _solve(self, structure, wavelength):
pass
def solve(self, structure):
"""
Find the modes of a given structure.
Args:
structure (Structure): The target structure to solve
for modes.
Returns:
dict: The 'n_effs' key gives the effective indices
of the modes. The 'modes' key exists of mode
profiles were solved for; in this case, it will
return arrays of the mode profiles.
"""
return self._solve(structure, structure._wl)
def solve_sweep_wavelength(
self,
structure,
wavelengths,
filename="wavelength_n_effs.dat",
plot=True,
):
"""
Solve for the effective indices of a fixed structure at
different wavelengths.
Args:
structure (Slabs): The target structure to solve
for modes.
wavelengths (list): A list of wavelengths to sweep
over.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'wavelength_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
Returns:
list: A list of the effective indices found for each wavelength.
"""
n_effs = []
for w in tqdm.tqdm(wavelengths, ncols=70):
structure.change_wavelength(w)
self.solve(structure)
n_effs.append(np.real(self.n_effs))
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, wavelengths
)
if plot:
if MPL:
title = "$n_{eff}$ vs Wavelength"
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs Wavelength" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename,
self._modes_directory + "fraction_te.dat",
"Wavelength",
"n_{eff}",
title,
)
return n_effs
def solve_ng(self, structure, wavelength_step=0.01, filename="ng.dat"):
r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'ng.dat'.
Returns:
list: A list of the group indices found for each mode.
"""
wl_nom = structure._wl
self.solve(structure)
n_ctrs = self.n_effs
structure.change_wavelength(wl_nom - wavelength_step)
self.solve(structure)
n_bcks = self.n_effs
structure.change_wavelength(wl_nom + wavelength_step)
self.solve(structure)
n_frws = self.n_effs
n_gs = []
for n_ctr, n_bck, n_frw in zip(n_ctrs, n_bcks, n_frws):
n_gs.append(
n_ctr - wl_nom * (n_frw - n_bck) / (2 * wavelength_step)
)
if filename:
with open(self._modes_directory + filename, "w") as fs:
fs.write("# Mode idx, Group index\n")
for idx, n_g in enumerate(n_gs):
fs.write("%i,%.3f\n" % (idx, np.round(n_g.real, 3)))
return n_gs
def _get_mode_filename(self, field_name, mode_number, filename):
filename_prefix, filename_ext = os.path.splitext(filename)
filename_mode = (
filename_prefix
+ "_"
+ field_name
+ "_"
+ str(mode_number)
+ filename_ext
)
return filename_mode
def _write_n_effs_to_file(self, n_effs, filename, x_vals=None):
with open(filename, "w") as fs:
fs.write('# Sweep param, mode 1, mode 2, ...\n')
for i, n_eff in enumerate(n_effs):
if x_vals is not None:
line_start = str(x_vals[i]) + ","
else:
line_start = ""
line = ",".join([str(np.round(n, 3)) for n in n_eff])
fs.write(line_start + line + "\n")
return n_effs
def _write_mode_to_file(self, mode, filename):
with open(filename, "w") as fs:
for e in mode[::-1]:
e_str = ",".join([str(v) for v in e])
fs.write(e_str + "\n")
return mode
def _plot_n_effs(self, filename_n_effs, filename_te_fractions, xlabel, ylabel, title):
args = {
"titl": title,
"xlab": xlabel,
"ylab": ylabel,
"filename_data": filename_n_effs,
"filename_frac_te": filename_te_fractions,
"filename_image": None,
"num_modes": len(self.modes),
}
filename_image_prefix, _ = os.path.splitext(filename_n_effs)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
data = np.loadtxt(args["filename_data"], delimiter=",").T
plt.clf()
plt.title(title)
plt.xlabel(args["xlab"])
plt.ylabel(args["ylab"])
for i in range(args["num_modes"]):
plt.plot(data[0], data[i + 1], "-o")
plt.savefig(args["filename_image"])
else:
gp.gnuplot(self._path + "n_effs.gpi", args, silent=False)
gp.trim_pad_image(filename_image)
return args
def _plot_fraction(
self, filename_fraction, xlabel, ylabel, title, mode_list=[]
):
if not mode_list:
mode_list = range(len(self.modes))
gp_mode_list = " ".join(str(idx) for idx in mode_list)
args = {
"titl": title,
"xlab": xlabel,
"ylab": ylabel,
"filename_data": filename_fraction,
"filename_image": None,
"mode_list": gp_mode_list,
}
filename_image_prefix, _ = os.path.splitext(filename_fraction)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
data = np.loadtxt(args["filename_data"], delimiter=",").T
plt.clf()
plt.title(title)
plt.xlabel(args["xlab"])
plt.ylabel(args["ylab"])
for i, _ in enumerate(self.modes):
plt.plot(data[0], data[i + 1], "-o")
plt.savefig(args["filename_image"])
else:
gp.gnuplot(self._path + "fractions.gpi", args, silent=False)
gp.trim_pad_image(filename_image)
return args
def _plot_mode(
self,
field_name,
mode_number,
filename_mode,
n_eff=None,
subtitle="",
e2_x=0.0,
e2_y=0.0,
ctr_x=0.0,
ctr_y=0.0,
area=None,
wavelength=None,
):
fn = field_name[0] + "_{" + field_name[1:] + "}"
if MPL:
title = r"Mode %i $|%s|$ Profile" % (mode_number, fn)
else:
title = r"Mode %i |%s| Profile" % (mode_number, fn)
if n_eff:
if MPL:
title += r", $n_{eff}$: " + "{:.3f}".format(n_eff.real)
else:
title += ", n_{eff}: " + "{:.3f}".format(n_eff.real)
if wavelength:
if MPL:
title += r", $\lambda = %s " % "{:.3f} \mu$m".format(wavelength)
else:
title += r", $\lambda = %s " % "{:.3f} \mu$m".format(wavelength)
if area:
if MPL:
title += ", $A_%s$: " % field_name[1] + "{:.1f}%".format(area)
else:
title += ", A_%s: " % field_name[1] + "{:.1f}\%".format(area)
if subtitle:
if MPL:
title2 = "\n$%s$" % subtitle
else:
title += "\n{/*0.7 %s}" % subtitle
args = {
"title": title,
"x_pts": self._structure.xc_pts,
"y_pts": self._structure.yc_pts,
"x_min": self._structure.xc_min,
"x_max": self._structure.xc_max,
"y_min": self._structure.yc_min,
"y_max": self._structure.yc_max,
"x_step": self._structure.x_step,
"y_step": self._structure.y_step,
"filename_data": filename_mode,
"filename_image": None,
"e2_x": e2_x,
"e2_y": e2_y,
"ctr_x": ctr_x,
"ctr_y": ctr_y,
}
filename_image_prefix, _ = os.path.splitext(filename_mode)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
heatmap = np.loadtxt(filename_mode, delimiter=",")
plt.clf()
plt.suptitle(title)
if subtitle:
plt.rcParams.update({"axes.titlesize": "small"})
plt.title(title2)
plt.xlabel("x")
plt.ylabel("y")
plt.imshow(
np.flipud(heatmap),
extent=(
args["x_min"],
args["x_max"],
args["y_min"],
args["y_max"],
),
aspect="auto",
)
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(self._path + "mode.gpi", args)
gp.trim_pad_image(filename_image)
return args
|
jtambasco/modesolverpy | modesolverpy/mode_solver.py | _ModeSolver.solve_sweep_wavelength | python | def solve_sweep_wavelength(
self,
structure,
wavelengths,
filename="wavelength_n_effs.dat",
plot=True,
):
n_effs = []
for w in tqdm.tqdm(wavelengths, ncols=70):
structure.change_wavelength(w)
self.solve(structure)
n_effs.append(np.real(self.n_effs))
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, wavelengths
)
if plot:
if MPL:
title = "$n_{eff}$ vs Wavelength"
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs Wavelength" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename,
self._modes_directory + "fraction_te.dat",
"Wavelength",
"n_{eff}",
title,
)
return n_effs | Solve for the effective indices of a fixed structure at
different wavelengths.
Args:
structure (Slabs): The target structure to solve
for modes.
wavelengths (list): A list of wavelengths to sweep
over.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'wavelength_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
Returns:
list: A list of the effective indices found for each wavelength. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/mode_solver.py#L194-L243 | null | class _ModeSolver(with_metaclass(abc.ABCMeta)):
def __init__(
self,
n_eigs,
tol=0.0,
boundary="0000",
mode_profiles=True,
initial_mode_guess=None,
n_eff_guess=None,
):
self._n_eigs = int(n_eigs)
self._tol = tol
self._boundary = boundary
self._mode_profiles = mode_profiles
self._initial_mode_guess = initial_mode_guess
self._n_eff_guess = n_eff_guess
self.n_effs = None
self.modes = None
self.mode_types = None
self.overlaps = None
self._path = os.path.dirname(sys.modules[__name__].__file__) + "/"
@abc.abstractproperty
def _modes_directory(self):
pass
@abc.abstractmethod
def _solve(self, structure, wavelength):
pass
def solve(self, structure):
"""
Find the modes of a given structure.
Args:
structure (Structure): The target structure to solve
for modes.
Returns:
dict: The 'n_effs' key gives the effective indices
of the modes. The 'modes' key exists of mode
profiles were solved for; in this case, it will
return arrays of the mode profiles.
"""
return self._solve(structure, structure._wl)
def solve_sweep_structure(
self,
structures,
sweep_param_list,
filename="structure_n_effs.dat",
plot=True,
x_label="Structure number",
fraction_mode_list=[],
):
"""
Find the modes of many structures.
Args:
structures (list): A list of `Structures` to find the modes
of.
sweep_param_list (list): A list of the parameter-sweep sweep
that was used. This is for plotting purposes only.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'structure_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
x_label (str): x-axis text to display in the plot.
fraction_mode_list (list): A list of mode indices of the modes
that should be included in the TE/TM mode fraction plot.
If the list is empty, all modes will be included. The list
is empty by default.
Returns:
list: A list of the effective indices found for each structure.
"""
n_effs = []
mode_types = []
fractions_te = []
fractions_tm = []
for s in tqdm.tqdm(structures, ncols=70):
self.solve(s)
n_effs.append(np.real(self.n_effs))
mode_types.append(self._get_mode_types())
fractions_te.append(self.fraction_te)
fractions_tm.append(self.fraction_tm)
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, sweep_param_list
)
with open(self._modes_directory + "mode_types.dat", "w") as fs:
header = ",".join(
"Mode%i" % i for i, _ in enumerate(mode_types[0])
)
fs.write("# " + header + "\n")
for mt in mode_types:
txt = ",".join("%s %.2f" % pair for pair in mt)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_te.dat", "w") as fs:
header = "fraction te"
fs.write("# param sweep," + header + "\n")
for param, fte in zip(sweep_param_list, fractions_te):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in fte)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_tm.dat", "w") as fs:
header = "fraction tm"
fs.write("# param sweep," + header + "\n")
for param, ftm in zip(sweep_param_list, fractions_tm):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in ftm)
fs.write(txt + "\n")
if plot:
if MPL:
title = "$n_{eff}$ vs %s" % x_label
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs %s" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename, self._modes_directory + "fraction_te.dat", x_label, y_label, title
)
title = "TE Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_te.dat",
x_label,
"TE Fraction [%]",
title,
fraction_mode_list,
)
title = "TM Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_tm.dat",
x_label,
"TM Fraction [%]",
title,
fraction_mode_list,
)
return n_effs
def solve_ng(self, structure, wavelength_step=0.01, filename="ng.dat"):
r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'ng.dat'.
Returns:
list: A list of the group indices found for each mode.
"""
wl_nom = structure._wl
self.solve(structure)
n_ctrs = self.n_effs
structure.change_wavelength(wl_nom - wavelength_step)
self.solve(structure)
n_bcks = self.n_effs
structure.change_wavelength(wl_nom + wavelength_step)
self.solve(structure)
n_frws = self.n_effs
n_gs = []
for n_ctr, n_bck, n_frw in zip(n_ctrs, n_bcks, n_frws):
n_gs.append(
n_ctr - wl_nom * (n_frw - n_bck) / (2 * wavelength_step)
)
if filename:
with open(self._modes_directory + filename, "w") as fs:
fs.write("# Mode idx, Group index\n")
for idx, n_g in enumerate(n_gs):
fs.write("%i,%.3f\n" % (idx, np.round(n_g.real, 3)))
return n_gs
def _get_mode_filename(self, field_name, mode_number, filename):
filename_prefix, filename_ext = os.path.splitext(filename)
filename_mode = (
filename_prefix
+ "_"
+ field_name
+ "_"
+ str(mode_number)
+ filename_ext
)
return filename_mode
def _write_n_effs_to_file(self, n_effs, filename, x_vals=None):
with open(filename, "w") as fs:
fs.write('# Sweep param, mode 1, mode 2, ...\n')
for i, n_eff in enumerate(n_effs):
if x_vals is not None:
line_start = str(x_vals[i]) + ","
else:
line_start = ""
line = ",".join([str(np.round(n, 3)) for n in n_eff])
fs.write(line_start + line + "\n")
return n_effs
def _write_mode_to_file(self, mode, filename):
with open(filename, "w") as fs:
for e in mode[::-1]:
e_str = ",".join([str(v) for v in e])
fs.write(e_str + "\n")
return mode
def _plot_n_effs(self, filename_n_effs, filename_te_fractions, xlabel, ylabel, title):
args = {
"titl": title,
"xlab": xlabel,
"ylab": ylabel,
"filename_data": filename_n_effs,
"filename_frac_te": filename_te_fractions,
"filename_image": None,
"num_modes": len(self.modes),
}
filename_image_prefix, _ = os.path.splitext(filename_n_effs)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
data = np.loadtxt(args["filename_data"], delimiter=",").T
plt.clf()
plt.title(title)
plt.xlabel(args["xlab"])
plt.ylabel(args["ylab"])
for i in range(args["num_modes"]):
plt.plot(data[0], data[i + 1], "-o")
plt.savefig(args["filename_image"])
else:
gp.gnuplot(self._path + "n_effs.gpi", args, silent=False)
gp.trim_pad_image(filename_image)
return args
def _plot_fraction(
self, filename_fraction, xlabel, ylabel, title, mode_list=[]
):
if not mode_list:
mode_list = range(len(self.modes))
gp_mode_list = " ".join(str(idx) for idx in mode_list)
args = {
"titl": title,
"xlab": xlabel,
"ylab": ylabel,
"filename_data": filename_fraction,
"filename_image": None,
"mode_list": gp_mode_list,
}
filename_image_prefix, _ = os.path.splitext(filename_fraction)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
data = np.loadtxt(args["filename_data"], delimiter=",").T
plt.clf()
plt.title(title)
plt.xlabel(args["xlab"])
plt.ylabel(args["ylab"])
for i, _ in enumerate(self.modes):
plt.plot(data[0], data[i + 1], "-o")
plt.savefig(args["filename_image"])
else:
gp.gnuplot(self._path + "fractions.gpi", args, silent=False)
gp.trim_pad_image(filename_image)
return args
def _plot_mode(
self,
field_name,
mode_number,
filename_mode,
n_eff=None,
subtitle="",
e2_x=0.0,
e2_y=0.0,
ctr_x=0.0,
ctr_y=0.0,
area=None,
wavelength=None,
):
fn = field_name[0] + "_{" + field_name[1:] + "}"
if MPL:
title = r"Mode %i $|%s|$ Profile" % (mode_number, fn)
else:
title = r"Mode %i |%s| Profile" % (mode_number, fn)
if n_eff:
if MPL:
title += r", $n_{eff}$: " + "{:.3f}".format(n_eff.real)
else:
title += ", n_{eff}: " + "{:.3f}".format(n_eff.real)
if wavelength:
if MPL:
title += r", $\lambda = %s " % "{:.3f} \mu$m".format(wavelength)
else:
title += r", $\lambda = %s " % "{:.3f} \mu$m".format(wavelength)
if area:
if MPL:
title += ", $A_%s$: " % field_name[1] + "{:.1f}%".format(area)
else:
title += ", A_%s: " % field_name[1] + "{:.1f}\%".format(area)
if subtitle:
if MPL:
title2 = "\n$%s$" % subtitle
else:
title += "\n{/*0.7 %s}" % subtitle
args = {
"title": title,
"x_pts": self._structure.xc_pts,
"y_pts": self._structure.yc_pts,
"x_min": self._structure.xc_min,
"x_max": self._structure.xc_max,
"y_min": self._structure.yc_min,
"y_max": self._structure.yc_max,
"x_step": self._structure.x_step,
"y_step": self._structure.y_step,
"filename_data": filename_mode,
"filename_image": None,
"e2_x": e2_x,
"e2_y": e2_y,
"ctr_x": ctr_x,
"ctr_y": ctr_y,
}
filename_image_prefix, _ = os.path.splitext(filename_mode)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
heatmap = np.loadtxt(filename_mode, delimiter=",")
plt.clf()
plt.suptitle(title)
if subtitle:
plt.rcParams.update({"axes.titlesize": "small"})
plt.title(title2)
plt.xlabel("x")
plt.ylabel("y")
plt.imshow(
np.flipud(heatmap),
extent=(
args["x_min"],
args["x_max"],
args["y_min"],
args["y_max"],
),
aspect="auto",
)
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(self._path + "mode.gpi", args)
gp.trim_pad_image(filename_image)
return args
|
jtambasco/modesolverpy | modesolverpy/mode_solver.py | _ModeSolver.solve_ng | python | def solve_ng(self, structure, wavelength_step=0.01, filename="ng.dat"):
r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'ng.dat'.
Returns:
list: A list of the group indices found for each mode.
"""
wl_nom = structure._wl
self.solve(structure)
n_ctrs = self.n_effs
structure.change_wavelength(wl_nom - wavelength_step)
self.solve(structure)
n_bcks = self.n_effs
structure.change_wavelength(wl_nom + wavelength_step)
self.solve(structure)
n_frws = self.n_effs
n_gs = []
for n_ctr, n_bck, n_frw in zip(n_ctrs, n_bcks, n_frws):
n_gs.append(
n_ctr - wl_nom * (n_frw - n_bck) / (2 * wavelength_step)
)
if filename:
with open(self._modes_directory + filename, "w") as fs:
fs.write("# Mode idx, Group index\n")
for idx, n_g in enumerate(n_gs):
fs.write("%i,%.3f\n" % (idx, np.round(n_g.real, 3)))
return n_gs | r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'ng.dat'.
Returns:
list: A list of the group indices found for each mode. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/mode_solver.py#L245-L288 | null | class _ModeSolver(with_metaclass(abc.ABCMeta)):
def __init__(
self,
n_eigs,
tol=0.0,
boundary="0000",
mode_profiles=True,
initial_mode_guess=None,
n_eff_guess=None,
):
self._n_eigs = int(n_eigs)
self._tol = tol
self._boundary = boundary
self._mode_profiles = mode_profiles
self._initial_mode_guess = initial_mode_guess
self._n_eff_guess = n_eff_guess
self.n_effs = None
self.modes = None
self.mode_types = None
self.overlaps = None
self._path = os.path.dirname(sys.modules[__name__].__file__) + "/"
@abc.abstractproperty
def _modes_directory(self):
pass
@abc.abstractmethod
def _solve(self, structure, wavelength):
pass
def solve(self, structure):
"""
Find the modes of a given structure.
Args:
structure (Structure): The target structure to solve
for modes.
Returns:
dict: The 'n_effs' key gives the effective indices
of the modes. The 'modes' key exists of mode
profiles were solved for; in this case, it will
return arrays of the mode profiles.
"""
return self._solve(structure, structure._wl)
def solve_sweep_structure(
self,
structures,
sweep_param_list,
filename="structure_n_effs.dat",
plot=True,
x_label="Structure number",
fraction_mode_list=[],
):
"""
Find the modes of many structures.
Args:
structures (list): A list of `Structures` to find the modes
of.
sweep_param_list (list): A list of the parameter-sweep sweep
that was used. This is for plotting purposes only.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'structure_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
x_label (str): x-axis text to display in the plot.
fraction_mode_list (list): A list of mode indices of the modes
that should be included in the TE/TM mode fraction plot.
If the list is empty, all modes will be included. The list
is empty by default.
Returns:
list: A list of the effective indices found for each structure.
"""
n_effs = []
mode_types = []
fractions_te = []
fractions_tm = []
for s in tqdm.tqdm(structures, ncols=70):
self.solve(s)
n_effs.append(np.real(self.n_effs))
mode_types.append(self._get_mode_types())
fractions_te.append(self.fraction_te)
fractions_tm.append(self.fraction_tm)
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, sweep_param_list
)
with open(self._modes_directory + "mode_types.dat", "w") as fs:
header = ",".join(
"Mode%i" % i for i, _ in enumerate(mode_types[0])
)
fs.write("# " + header + "\n")
for mt in mode_types:
txt = ",".join("%s %.2f" % pair for pair in mt)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_te.dat", "w") as fs:
header = "fraction te"
fs.write("# param sweep," + header + "\n")
for param, fte in zip(sweep_param_list, fractions_te):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in fte)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_tm.dat", "w") as fs:
header = "fraction tm"
fs.write("# param sweep," + header + "\n")
for param, ftm in zip(sweep_param_list, fractions_tm):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in ftm)
fs.write(txt + "\n")
if plot:
if MPL:
title = "$n_{eff}$ vs %s" % x_label
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs %s" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename, self._modes_directory + "fraction_te.dat", x_label, y_label, title
)
title = "TE Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_te.dat",
x_label,
"TE Fraction [%]",
title,
fraction_mode_list,
)
title = "TM Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_tm.dat",
x_label,
"TM Fraction [%]",
title,
fraction_mode_list,
)
return n_effs
def solve_sweep_wavelength(
self,
structure,
wavelengths,
filename="wavelength_n_effs.dat",
plot=True,
):
"""
Solve for the effective indices of a fixed structure at
different wavelengths.
Args:
structure (Slabs): The target structure to solve
for modes.
wavelengths (list): A list of wavelengths to sweep
over.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'wavelength_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
Returns:
list: A list of the effective indices found for each wavelength.
"""
n_effs = []
for w in tqdm.tqdm(wavelengths, ncols=70):
structure.change_wavelength(w)
self.solve(structure)
n_effs.append(np.real(self.n_effs))
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, wavelengths
)
if plot:
if MPL:
title = "$n_{eff}$ vs Wavelength"
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs Wavelength" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename,
self._modes_directory + "fraction_te.dat",
"Wavelength",
"n_{eff}",
title,
)
return n_effs
def _get_mode_filename(self, field_name, mode_number, filename):
filename_prefix, filename_ext = os.path.splitext(filename)
filename_mode = (
filename_prefix
+ "_"
+ field_name
+ "_"
+ str(mode_number)
+ filename_ext
)
return filename_mode
def _write_n_effs_to_file(self, n_effs, filename, x_vals=None):
with open(filename, "w") as fs:
fs.write('# Sweep param, mode 1, mode 2, ...\n')
for i, n_eff in enumerate(n_effs):
if x_vals is not None:
line_start = str(x_vals[i]) + ","
else:
line_start = ""
line = ",".join([str(np.round(n, 3)) for n in n_eff])
fs.write(line_start + line + "\n")
return n_effs
def _write_mode_to_file(self, mode, filename):
with open(filename, "w") as fs:
for e in mode[::-1]:
e_str = ",".join([str(v) for v in e])
fs.write(e_str + "\n")
return mode
def _plot_n_effs(self, filename_n_effs, filename_te_fractions, xlabel, ylabel, title):
args = {
"titl": title,
"xlab": xlabel,
"ylab": ylabel,
"filename_data": filename_n_effs,
"filename_frac_te": filename_te_fractions,
"filename_image": None,
"num_modes": len(self.modes),
}
filename_image_prefix, _ = os.path.splitext(filename_n_effs)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
data = np.loadtxt(args["filename_data"], delimiter=",").T
plt.clf()
plt.title(title)
plt.xlabel(args["xlab"])
plt.ylabel(args["ylab"])
for i in range(args["num_modes"]):
plt.plot(data[0], data[i + 1], "-o")
plt.savefig(args["filename_image"])
else:
gp.gnuplot(self._path + "n_effs.gpi", args, silent=False)
gp.trim_pad_image(filename_image)
return args
def _plot_fraction(
self, filename_fraction, xlabel, ylabel, title, mode_list=[]
):
if not mode_list:
mode_list = range(len(self.modes))
gp_mode_list = " ".join(str(idx) for idx in mode_list)
args = {
"titl": title,
"xlab": xlabel,
"ylab": ylabel,
"filename_data": filename_fraction,
"filename_image": None,
"mode_list": gp_mode_list,
}
filename_image_prefix, _ = os.path.splitext(filename_fraction)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
data = np.loadtxt(args["filename_data"], delimiter=",").T
plt.clf()
plt.title(title)
plt.xlabel(args["xlab"])
plt.ylabel(args["ylab"])
for i, _ in enumerate(self.modes):
plt.plot(data[0], data[i + 1], "-o")
plt.savefig(args["filename_image"])
else:
gp.gnuplot(self._path + "fractions.gpi", args, silent=False)
gp.trim_pad_image(filename_image)
return args
def _plot_mode(
self,
field_name,
mode_number,
filename_mode,
n_eff=None,
subtitle="",
e2_x=0.0,
e2_y=0.0,
ctr_x=0.0,
ctr_y=0.0,
area=None,
wavelength=None,
):
fn = field_name[0] + "_{" + field_name[1:] + "}"
if MPL:
title = r"Mode %i $|%s|$ Profile" % (mode_number, fn)
else:
title = r"Mode %i |%s| Profile" % (mode_number, fn)
if n_eff:
if MPL:
title += r", $n_{eff}$: " + "{:.3f}".format(n_eff.real)
else:
title += ", n_{eff}: " + "{:.3f}".format(n_eff.real)
if wavelength:
if MPL:
title += r", $\lambda = %s " % "{:.3f} \mu$m".format(wavelength)
else:
title += r", $\lambda = %s " % "{:.3f} \mu$m".format(wavelength)
if area:
if MPL:
title += ", $A_%s$: " % field_name[1] + "{:.1f}%".format(area)
else:
title += ", A_%s: " % field_name[1] + "{:.1f}\%".format(area)
if subtitle:
if MPL:
title2 = "\n$%s$" % subtitle
else:
title += "\n{/*0.7 %s}" % subtitle
args = {
"title": title,
"x_pts": self._structure.xc_pts,
"y_pts": self._structure.yc_pts,
"x_min": self._structure.xc_min,
"x_max": self._structure.xc_max,
"y_min": self._structure.yc_min,
"y_max": self._structure.yc_max,
"x_step": self._structure.x_step,
"y_step": self._structure.y_step,
"filename_data": filename_mode,
"filename_image": None,
"e2_x": e2_x,
"e2_y": e2_y,
"ctr_x": ctr_x,
"ctr_y": ctr_y,
}
filename_image_prefix, _ = os.path.splitext(filename_mode)
filename_image = filename_image_prefix + ".png"
args["filename_image"] = filename_image
if MPL:
heatmap = np.loadtxt(filename_mode, delimiter=",")
plt.clf()
plt.suptitle(title)
if subtitle:
plt.rcParams.update({"axes.titlesize": "small"})
plt.title(title2)
plt.xlabel("x")
plt.ylabel("y")
plt.imshow(
np.flipud(heatmap),
extent=(
args["x_min"],
args["x_max"],
args["y_min"],
args["y_max"],
),
aspect="auto",
)
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(self._path + "mode.gpi", args)
gp.trim_pad_image(filename_image)
return args
|
jtambasco/modesolverpy | modesolverpy/mode_solver.py | ModeSolverSemiVectorial.write_modes_to_file | python | def write_modes_to_file(self, filename="mode.dat", plot=True, analyse=True):
modes_directory = "./modes_semi_vec/"
if not os.path.isdir(modes_directory):
os.mkdir(modes_directory)
filename = modes_directory + filename
for i, mode in enumerate(self._ms.modes):
filename_mode = self._get_mode_filename(
self._semi_vectorial_method, i, filename
)
self._write_mode_to_file(np.real(mode), filename_mode)
if plot:
if i == 0 and analyse:
A, centre, sigma_2 = anal.fit_gaussian(
self._structure.xc, self._structure.yc, np.abs(mode)
)
subtitle = (
"E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, "
"MFD_{y} = %.3f"
) % (A, centre[0], centre[1], sigma_2[0], sigma_2[1])
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
subtitle,
sigma_2[0],
sigma_2[1],
centre[0],
centre[1],
wavelength=self._structure._wl,
)
else:
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
wavelength=self._structure._wl,
)
return self.modes | Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for). | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/mode_solver.py#L548-L610 | [
"def fit_gaussian(x, y, z_2d, save_fits=False):\n z = z_2d\n\n max_idx = np.unravel_index(z.argmax(), z.shape)\n max_row = max_idx[0] - 1\n max_col = max_idx[1] - 1\n\n z_max_row = z[max_row, :]\n z_max_col = z[:, max_col]\n A = z[max_row, max_col]\n\n p_guess_x = (A, x[max_col], 0.1*(x[-1] ... | class ModeSolverSemiVectorial(_ModeSolver):
"""
A semi-vectorial mode solver object used to
setup and run a mode solving simulation.
Args:
n_eigs (int): The number of eigen-values to solve for.
tol (float): The precision of the eigen-value/eigen-vector
solver. Default is 0.001.
boundary (str): The boundary conditions to use.
This is a string that identifies the type of boundary conditions applied.
The following options are available: 'A' - Hx is antisymmetric, Hy is symmetric,
'S' - Hx is symmetric and, Hy is antisymmetric, and '0' - Hx and Hy are zero
immediately outside of the boundary.
The string identifies all four boundary conditions, in the order:
North, south, east, west. For example, boundary='000A'. Default is '0000'.
mode_profiles (bool): `True if the the mode-profiles should be found, `False`
if only the effective indices should be found.
initial_mode_guess (list): An initial mode guess for the modesolver.
semi_vectorial_method (str): Either 'Ex' or 'Ey'. If 'Ex', the mode solver
will only find TE modes (horizontally polarised to the simulation window),
if 'Ey', the mode solver will find TM modes (vertically polarised to the
simulation window).
"""
def __init__(
self,
n_eigs,
tol=0.001,
boundary="0000",
mode_profiles=True,
initial_mode_guess=None,
semi_vectorial_method="Ex",
):
self._semi_vectorial_method = semi_vectorial_method
_ModeSolver.__init__(
self, n_eigs, tol, boundary, mode_profiles, initial_mode_guess
)
@property
def _modes_directory(self):
modes_directory = "./modes_semi_vec/"
if not os.path.exists(modes_directory):
os.mkdir(modes_directory)
_modes_directory = modes_directory
return _modes_directory
def _solve(self, structure, wavelength):
self._structure = structure
self._ms = ms._ModeSolverSemiVectorial(
wavelength, structure, self._boundary, self._semi_vectorial_method
)
self._ms.solve(
self._n_eigs,
self._tol,
self._mode_profiles,
initial_mode_guess=self._initial_mode_guess,
)
self.n_effs = self._ms.neff
r = {"n_effs": self.n_effs}
if self._mode_profiles:
r["modes"] = self._ms.modes
self._ms.modes[0] = np.real(self._ms.modes[0])
self._initial_mode_guess = np.real(self._ms.modes[0])
self.modes = self._ms.modes
return r
|
jtambasco/modesolverpy | modesolverpy/mode_solver.py | ModeSolverFullyVectorial.write_modes_to_file | python | def write_modes_to_file(
self,
filename="mode.dat",
plot=True,
fields_to_write=("Ex", "Ey", "Ez", "Hx", "Hy", "Hz"),
):
modes_directory = self._modes_directory
# Mode info file.
with open(modes_directory + "mode_info", "w") as fs:
fs.write("# Mode idx, Mode type, % in major direction, n_eff\n")
for i, (n_eff, (mode_type, percentage)) in enumerate(
zip(self.n_effs, self.mode_types)
):
mode_idx = str(i)
line = "%s,%s,%.2f,%.3f" % (
mode_idx,
mode_type,
percentage,
n_eff.real,
)
fs.write(line + "\n")
# Mode field plots.
for i, (mode, areas) in enumerate(zip(self._ms.modes, self.overlaps)):
mode_directory = "%smode_%i/" % (modes_directory, i)
if not os.path.isdir(mode_directory):
os.mkdir(mode_directory)
filename_full = mode_directory + filename
for (field_name, field_profile), area in zip(
mode.fields.items(), areas
):
if field_name in fields_to_write:
filename_mode = self._get_mode_filename(
field_name, i, filename_full
)
self._write_mode_to_file(
np.real(field_profile), filename_mode
)
if plot:
self._plot_mode(
field_name,
i,
filename_mode,
self.n_effs[i],
area=area,
wavelength=self._structure._wl,
)
return self.modes | Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each field and mode number. Default is
'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
fields_to_write (tuple): A tuple of strings where the
strings can be 'Ex', 'Ey', 'Ez', 'Hx', 'Hy' and 'Hz'
defining what part of the mode should be saved and
plotted. By default, all six components are written
and plotted.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for). | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/mode_solver.py#L729-L799 | null | class ModeSolverFullyVectorial(_ModeSolver):
"""
A fully-vectorial mode solver object used to
setup and run a mode solving simulation.
Args:
n_eigs (int): The number of eigen-values to solve for.
tol (float): The precision of the eigen-value/eigen-vector
solver. Default is 0.001.
boundary (str): The boundary conditions to use.
This is a string that identifies the type of boundary conditions applied.
The following options are available: 'A' - Hx is antisymmetric, Hy is symmetric,
'S' - Hx is symmetric and, Hy is antisymmetric, and '0' - Hx and Hy are zero
immediately outside of the boundary.
The string identifies all four boundary conditions, in the order:
North, south, east, west. For example, boundary='000A'. Default is '0000'.
initial_mode_guess (list): An initial mode guess for the modesolver.
initial_n_eff_guess (list): An initial effective index guess for the modesolver.
"""
def __init__(
self,
n_eigs,
tol=0.001,
boundary="0000",
initial_mode_guess=None,
n_eff_guess=None,
):
self.n_effs_te = None
self.n_effs_tm = None
_ModeSolver.__init__(
self, n_eigs, tol, boundary, False, initial_mode_guess, n_eff_guess
)
@property
def _modes_directory(self):
modes_directory = "./modes_full_vec/"
if not os.path.exists(modes_directory):
os.mkdir(modes_directory)
_modes_directory = modes_directory
return _modes_directory
def _solve(self, structure, wavelength):
self._structure = structure
self._ms = ms._ModeSolverVectorial(
wavelength, structure, self._boundary
)
self._ms.solve(
self._n_eigs,
self._tol,
self._n_eff_guess,
initial_mode_guess=self._initial_mode_guess,
)
self.n_effs = self._ms.neff
r = {"n_effs": self.n_effs}
r["modes"] = self.modes = self._ms.modes
self.overlaps, self.fraction_te, self.fraction_tm = self._get_overlaps(
self.modes
)
self.mode_types = self._get_mode_types()
self._initial_mode_guess = None
self.n_effs_te, self.n_effs_tm = self._sort_neffs(self._ms.neff)
return r
def _get_mode_types(self):
mode_types = []
labels = {0: "qTE", 1: "qTM", 2: "qTE/qTM"}
for overlap in self.overlaps:
idx = np.argmax(overlap[0:3])
mode_types.append((labels[idx], np.round(overlap[idx], 2)))
return mode_types
def _sort_neffs(self, n_effs):
mode_types = self._get_mode_types()
n_effs_te = []
n_effs_tm = []
for mt, n_eff in zip(mode_types, n_effs):
if mt[0] == "qTE":
n_effs_te.append(n_eff)
elif mt[0] == "qTM":
n_effs_tm.append(n_eff)
return n_effs_te, n_effs_tm
def _get_overlaps(self, fields):
mode_areas = []
fraction_te = []
fraction_tm = []
for mode in self._ms.modes:
e_fields = (mode.fields["Ex"], mode.fields["Ey"], mode.fields["Ez"])
h_fields = (mode.fields["Hx"], mode.fields["Hy"], mode.fields["Hz"])
areas_e = [np.sum(np.abs(e) ** 2) for e in e_fields]
areas_e /= np.sum(areas_e)
areas_e *= 100
areas_h = [np.sum(np.abs(h) ** 2) for h in h_fields]
areas_h /= np.sum(areas_h)
areas_h *= 100
fraction_te.append(areas_e[0] / (areas_e[0] + areas_e[1]))
fraction_tm.append(areas_e[1] / (areas_e[0] + areas_e[1]))
areas = areas_e.tolist()
areas.extend(areas_h)
mode_areas.append(areas)
return mode_areas, fraction_te, fraction_tm
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.