repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.to_serializable | def to_serializable(self, use_bytes=False, bias_dtype=np.float32,
bytes_type=bytes):
"""Convert the binary quadratic model to a serializable object.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bias_dtype (numpy.dtype, optional, default=numpy.float32):
If `use_bytes` is True, this numpy dtype will be used to
represent the bias values in the serialized format.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: An object that can be serialized.
Examples:
Encode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
Encode using BSON_ in python 3.5+
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
Encode using BSON in python 2.7. Because :class:`bytes` is an alias for :class:`str`,
we need to signal to the encoder that it should encode the biases and labels as binary
data.
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True, bytes_type=bson.Binary)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
See also:
:meth:`~.BinaryQuadraticModel.from_serializable`
:func:`json.dumps`, :func:`json.dump` JSON encoding functions
:meth:`bson.BSON.encode` BSON encoding method
.. _BSON: http://bsonspec.org/
"""
from dimod.package_info import __version__
schema_version = "2.0.0"
try:
variables = sorted(self.variables)
except TypeError:
# sorting unlike types in py3
variables = list(self.variables)
num_variables = len(variables)
# when doing byte encoding we can use less space depending on the
# total number of variables
index_dtype = np.uint16 if num_variables <= 2**16 else np.uint32
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(
dtype=bias_dtype,
index_dtype=index_dtype,
sort_indices=True,
variable_order=variables)
doc = {"basetype": "BinaryQuadraticModel",
"type": type(self).__name__,
"version": {"dimod": __version__,
"bqm_schema": schema_version},
"variable_labels": variables,
"variable_type": self.vartype.name,
"info": self.info,
"offset": float(offset),
"use_bytes": bool(use_bytes)
}
if use_bytes:
doc.update({'linear_biases': array2bytes(ldata, bytes_type=bytes_type),
'quadratic_biases': array2bytes(qdata, bytes_type=bytes_type),
'quadratic_head': array2bytes(irow, bytes_type=bytes_type),
'quadratic_tail': array2bytes(icol, bytes_type=bytes_type)})
else:
doc.update({'linear_biases': ldata.tolist(),
'quadratic_biases': qdata.tolist(),
'quadratic_head': irow.tolist(),
'quadratic_tail': icol.tolist()})
return doc | python | def to_serializable(self, use_bytes=False, bias_dtype=np.float32,
bytes_type=bytes):
"""Convert the binary quadratic model to a serializable object.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bias_dtype (numpy.dtype, optional, default=numpy.float32):
If `use_bytes` is True, this numpy dtype will be used to
represent the bias values in the serialized format.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: An object that can be serialized.
Examples:
Encode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
Encode using BSON_ in python 3.5+
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
Encode using BSON in python 2.7. Because :class:`bytes` is an alias for :class:`str`,
we need to signal to the encoder that it should encode the biases and labels as binary
data.
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True, bytes_type=bson.Binary)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
See also:
:meth:`~.BinaryQuadraticModel.from_serializable`
:func:`json.dumps`, :func:`json.dump` JSON encoding functions
:meth:`bson.BSON.encode` BSON encoding method
.. _BSON: http://bsonspec.org/
"""
from dimod.package_info import __version__
schema_version = "2.0.0"
try:
variables = sorted(self.variables)
except TypeError:
# sorting unlike types in py3
variables = list(self.variables)
num_variables = len(variables)
# when doing byte encoding we can use less space depending on the
# total number of variables
index_dtype = np.uint16 if num_variables <= 2**16 else np.uint32
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(
dtype=bias_dtype,
index_dtype=index_dtype,
sort_indices=True,
variable_order=variables)
doc = {"basetype": "BinaryQuadraticModel",
"type": type(self).__name__,
"version": {"dimod": __version__,
"bqm_schema": schema_version},
"variable_labels": variables,
"variable_type": self.vartype.name,
"info": self.info,
"offset": float(offset),
"use_bytes": bool(use_bytes)
}
if use_bytes:
doc.update({'linear_biases': array2bytes(ldata, bytes_type=bytes_type),
'quadratic_biases': array2bytes(qdata, bytes_type=bytes_type),
'quadratic_head': array2bytes(irow, bytes_type=bytes_type),
'quadratic_tail': array2bytes(icol, bytes_type=bytes_type)})
else:
doc.update({'linear_biases': ldata.tolist(),
'quadratic_biases': qdata.tolist(),
'quadratic_head': irow.tolist(),
'quadratic_tail': icol.tolist()})
return doc | [
"def",
"to_serializable",
"(",
"self",
",",
"use_bytes",
"=",
"False",
",",
"bias_dtype",
"=",
"np",
".",
"float32",
",",
"bytes_type",
"=",
"bytes",
")",
":",
"from",
"dimod",
".",
"package_info",
"import",
"__version__",
"schema_version",
"=",
"\"2.0.0\"",
... | Convert the binary quadratic model to a serializable object.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bias_dtype (numpy.dtype, optional, default=numpy.float32):
If `use_bytes` is True, this numpy dtype will be used to
represent the bias values in the serialized format.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: An object that can be serialized.
Examples:
Encode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
Encode using BSON_ in python 3.5+
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
Encode using BSON in python 2.7. Because :class:`bytes` is an alias for :class:`str`,
we need to signal to the encoder that it should encode the biases and labels as binary
data.
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True, bytes_type=bson.Binary)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
See also:
:meth:`~.BinaryQuadraticModel.from_serializable`
:func:`json.dumps`, :func:`json.dump` JSON encoding functions
:meth:`bson.BSON.encode` BSON encoding method
.. _BSON: http://bsonspec.org/ | [
"Convert",
"the",
"binary",
"quadratic",
"model",
"to",
"a",
"serializable",
"object",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1654-L1758 | train | 212,600 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.from_serializable | def from_serializable(cls, obj):
"""Deserialize a binary quadratic model.
Args:
obj (dict):
A binary quadratic model serialized by :meth:`~.BinaryQuadraticModel.to_serializable`.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
Encode and decode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
>>> new_bqm = dimod.BinaryQuadraticModel.from_serializable(json.loads(s))
See also:
:meth:`~.BinaryQuadraticModel.to_serializable`
:func:`json.loads`, :func:`json.load` JSON deserialization functions
"""
if obj.get("version", {"bqm_schema": "1.0.0"})["bqm_schema"] != "2.0.0":
return cls._from_serializable_v1(obj)
variables = [tuple(v) if isinstance(v, list) else v
for v in obj["variable_labels"]]
if obj["use_bytes"]:
ldata = bytes2array(obj["linear_biases"])
qdata = bytes2array(obj["quadratic_biases"])
irow = bytes2array(obj["quadratic_head"])
icol = bytes2array(obj["quadratic_tail"])
else:
ldata = obj["linear_biases"]
qdata = obj["quadratic_biases"]
irow = obj["quadratic_head"]
icol = obj["quadratic_tail"]
offset = obj["offset"]
vartype = obj["variable_type"]
bqm = cls.from_numpy_vectors(ldata,
(irow, icol, qdata),
offset,
str(vartype), # handle unicode for py2
variable_order=variables)
bqm.info.update(obj["info"])
return bqm | python | def from_serializable(cls, obj):
"""Deserialize a binary quadratic model.
Args:
obj (dict):
A binary quadratic model serialized by :meth:`~.BinaryQuadraticModel.to_serializable`.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
Encode and decode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
>>> new_bqm = dimod.BinaryQuadraticModel.from_serializable(json.loads(s))
See also:
:meth:`~.BinaryQuadraticModel.to_serializable`
:func:`json.loads`, :func:`json.load` JSON deserialization functions
"""
if obj.get("version", {"bqm_schema": "1.0.0"})["bqm_schema"] != "2.0.0":
return cls._from_serializable_v1(obj)
variables = [tuple(v) if isinstance(v, list) else v
for v in obj["variable_labels"]]
if obj["use_bytes"]:
ldata = bytes2array(obj["linear_biases"])
qdata = bytes2array(obj["quadratic_biases"])
irow = bytes2array(obj["quadratic_head"])
icol = bytes2array(obj["quadratic_tail"])
else:
ldata = obj["linear_biases"]
qdata = obj["quadratic_biases"]
irow = obj["quadratic_head"]
icol = obj["quadratic_tail"]
offset = obj["offset"]
vartype = obj["variable_type"]
bqm = cls.from_numpy_vectors(ldata,
(irow, icol, qdata),
offset,
str(vartype), # handle unicode for py2
variable_order=variables)
bqm.info.update(obj["info"])
return bqm | [
"def",
"from_serializable",
"(",
"cls",
",",
"obj",
")",
":",
"if",
"obj",
".",
"get",
"(",
"\"version\"",
",",
"{",
"\"bqm_schema\"",
":",
"\"1.0.0\"",
"}",
")",
"[",
"\"bqm_schema\"",
"]",
"!=",
"\"2.0.0\"",
":",
"return",
"cls",
".",
"_from_serializable... | Deserialize a binary quadratic model.
Args:
obj (dict):
A binary quadratic model serialized by :meth:`~.BinaryQuadraticModel.to_serializable`.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
Encode and decode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
>>> new_bqm = dimod.BinaryQuadraticModel.from_serializable(json.loads(s))
See also:
:meth:`~.BinaryQuadraticModel.to_serializable`
:func:`json.loads`, :func:`json.load` JSON deserialization functions | [
"Deserialize",
"a",
"binary",
"quadratic",
"model",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1798-L1852 | train | 212,601 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.to_networkx_graph | def to_networkx_graph(self, node_attribute_name='bias', edge_attribute_name='bias'):
"""Convert a binary quadratic model to NetworkX graph format.
Args:
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases.
Returns:
:class:`networkx.Graph`: A NetworkX graph with biases stored as
node/edge attributes.
Examples:
This example converts a binary quadratic model to a NetworkX graph, using first
the default attribute name for quadratic biases then "weight".
>>> import networkx as nx
>>> bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> BQM = bqm.to_networkx_graph()
>>> BQM[0][1]['bias']
0.5
>>> BQM.node[0]['bias']
1
>>> BQM_w = bqm.to_networkx_graph(edge_attribute_name='weight')
>>> BQM_w[0][1]['weight']
0.5
"""
import networkx as nx
BQM = nx.Graph()
# add the linear biases
BQM.add_nodes_from(((v, {node_attribute_name: bias, 'vartype': self.vartype})
for v, bias in iteritems(self.linear)))
# add the quadratic biases
BQM.add_edges_from(((u, v, {edge_attribute_name: bias}) for (u, v), bias in iteritems(self.quadratic)))
# set the offset and vartype properties for the graph
BQM.offset = self.offset
BQM.vartype = self.vartype
return BQM | python | def to_networkx_graph(self, node_attribute_name='bias', edge_attribute_name='bias'):
"""Convert a binary quadratic model to NetworkX graph format.
Args:
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases.
Returns:
:class:`networkx.Graph`: A NetworkX graph with biases stored as
node/edge attributes.
Examples:
This example converts a binary quadratic model to a NetworkX graph, using first
the default attribute name for quadratic biases then "weight".
>>> import networkx as nx
>>> bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> BQM = bqm.to_networkx_graph()
>>> BQM[0][1]['bias']
0.5
>>> BQM.node[0]['bias']
1
>>> BQM_w = bqm.to_networkx_graph(edge_attribute_name='weight')
>>> BQM_w[0][1]['weight']
0.5
"""
import networkx as nx
BQM = nx.Graph()
# add the linear biases
BQM.add_nodes_from(((v, {node_attribute_name: bias, 'vartype': self.vartype})
for v, bias in iteritems(self.linear)))
# add the quadratic biases
BQM.add_edges_from(((u, v, {edge_attribute_name: bias}) for (u, v), bias in iteritems(self.quadratic)))
# set the offset and vartype properties for the graph
BQM.offset = self.offset
BQM.vartype = self.vartype
return BQM | [
"def",
"to_networkx_graph",
"(",
"self",
",",
"node_attribute_name",
"=",
"'bias'",
",",
"edge_attribute_name",
"=",
"'bias'",
")",
":",
"import",
"networkx",
"as",
"nx",
"BQM",
"=",
"nx",
".",
"Graph",
"(",
")",
"# add the linear biases",
"BQM",
".",
"add_nod... | Convert a binary quadratic model to NetworkX graph format.
Args:
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases.
Returns:
:class:`networkx.Graph`: A NetworkX graph with biases stored as
node/edge attributes.
Examples:
This example converts a binary quadratic model to a NetworkX graph, using first
the default attribute name for quadratic biases then "weight".
>>> import networkx as nx
>>> bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> BQM = bqm.to_networkx_graph()
>>> BQM[0][1]['bias']
0.5
>>> BQM.node[0]['bias']
1
>>> BQM_w = bqm.to_networkx_graph(edge_attribute_name='weight')
>>> BQM_w[0][1]['weight']
0.5 | [
"Convert",
"a",
"binary",
"quadratic",
"model",
"to",
"NetworkX",
"graph",
"format",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1854-L1902 | train | 212,602 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.from_networkx_graph | def from_networkx_graph(cls, G, vartype=None, node_attribute_name='bias',
edge_attribute_name='bias'):
"""Create a binary quadratic model from a NetworkX graph.
Args:
G (:obj:`networkx.Graph`):
A NetworkX graph with biases stored as node/edge attributes.
vartype (:class:`.Vartype`/str/set, optional):
Variable type for the binary quadratic model. Accepted input
values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
If not provided, the `G` should have a vartype attribute. If
`vartype` is provided and `G.vartype` exists then the argument
overrides the property.
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases. If the node does not have a
matching attribute then the bias defaults to 0.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases. If the edge does not have a
matching attribute then the bias defaults to 0.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import networkx as nx
...
>>> G = nx.Graph()
>>> G.add_node('a', bias=.5)
>>> G.add_edge('a', 'b', bias=-1)
>>> bqm = dimod.BinaryQuadraticModel.from_networkx_graph(G, 'SPIN')
>>> bqm.adj['a']['b']
-1
"""
if vartype is None:
if not hasattr(G, 'vartype'):
msg = ("either 'vartype' argument must be provided or "
"the given graph should have a vartype attribute.")
raise ValueError(msg)
vartype = G.vartype
linear = G.nodes(data=node_attribute_name, default=0)
quadratic = G.edges(data=edge_attribute_name, default=0)
offset = getattr(G, 'offset', 0)
return cls(linear, quadratic, offset, vartype) | python | def from_networkx_graph(cls, G, vartype=None, node_attribute_name='bias',
edge_attribute_name='bias'):
"""Create a binary quadratic model from a NetworkX graph.
Args:
G (:obj:`networkx.Graph`):
A NetworkX graph with biases stored as node/edge attributes.
vartype (:class:`.Vartype`/str/set, optional):
Variable type for the binary quadratic model. Accepted input
values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
If not provided, the `G` should have a vartype attribute. If
`vartype` is provided and `G.vartype` exists then the argument
overrides the property.
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases. If the node does not have a
matching attribute then the bias defaults to 0.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases. If the edge does not have a
matching attribute then the bias defaults to 0.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import networkx as nx
...
>>> G = nx.Graph()
>>> G.add_node('a', bias=.5)
>>> G.add_edge('a', 'b', bias=-1)
>>> bqm = dimod.BinaryQuadraticModel.from_networkx_graph(G, 'SPIN')
>>> bqm.adj['a']['b']
-1
"""
if vartype is None:
if not hasattr(G, 'vartype'):
msg = ("either 'vartype' argument must be provided or "
"the given graph should have a vartype attribute.")
raise ValueError(msg)
vartype = G.vartype
linear = G.nodes(data=node_attribute_name, default=0)
quadratic = G.edges(data=edge_attribute_name, default=0)
offset = getattr(G, 'offset', 0)
return cls(linear, quadratic, offset, vartype) | [
"def",
"from_networkx_graph",
"(",
"cls",
",",
"G",
",",
"vartype",
"=",
"None",
",",
"node_attribute_name",
"=",
"'bias'",
",",
"edge_attribute_name",
"=",
"'bias'",
")",
":",
"if",
"vartype",
"is",
"None",
":",
"if",
"not",
"hasattr",
"(",
"G",
",",
"'... | Create a binary quadratic model from a NetworkX graph.
Args:
G (:obj:`networkx.Graph`):
A NetworkX graph with biases stored as node/edge attributes.
vartype (:class:`.Vartype`/str/set, optional):
Variable type for the binary quadratic model. Accepted input
values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
If not provided, the `G` should have a vartype attribute. If
`vartype` is provided and `G.vartype` exists then the argument
overrides the property.
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases. If the node does not have a
matching attribute then the bias defaults to 0.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases. If the edge does not have a
matching attribute then the bias defaults to 0.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import networkx as nx
...
>>> G = nx.Graph()
>>> G.add_node('a', bias=.5)
>>> G.add_edge('a', 'b', bias=-1)
>>> bqm = dimod.BinaryQuadraticModel.from_networkx_graph(G, 'SPIN')
>>> bqm.adj['a']['b']
-1 | [
"Create",
"a",
"binary",
"quadratic",
"model",
"from",
"a",
"NetworkX",
"graph",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1905-L1958 | train | 212,603 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.to_ising | def to_ising(self):
"""Converts a binary quadratic model to Ising format.
If the binary quadratic model's vartype is not :class:`.Vartype.SPIN`,
values are converted.
Returns:
tuple: 3-tuple of form (`linear`, `quadratic`, `offset`), where `linear`
is a dict of linear biases, `quadratic` is a dict of quadratic biases,
and `offset` is a number that represents the constant offset of the
binary quadratic model.
Examples:
This example converts a binary quadratic model to an Ising problem.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> model.to_ising() # doctest: +SKIP
({0: 1, 1: -1, 2: 0.5}, {(0, 1): 0.5, (1, 2): 1.5}, 1.4)
"""
# cast to a dict
return dict(self.spin.linear), dict(self.spin.quadratic), self.spin.offset | python | def to_ising(self):
"""Converts a binary quadratic model to Ising format.
If the binary quadratic model's vartype is not :class:`.Vartype.SPIN`,
values are converted.
Returns:
tuple: 3-tuple of form (`linear`, `quadratic`, `offset`), where `linear`
is a dict of linear biases, `quadratic` is a dict of quadratic biases,
and `offset` is a number that represents the constant offset of the
binary quadratic model.
Examples:
This example converts a binary quadratic model to an Ising problem.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> model.to_ising() # doctest: +SKIP
({0: 1, 1: -1, 2: 0.5}, {(0, 1): 0.5, (1, 2): 1.5}, 1.4)
"""
# cast to a dict
return dict(self.spin.linear), dict(self.spin.quadratic), self.spin.offset | [
"def",
"to_ising",
"(",
"self",
")",
":",
"# cast to a dict",
"return",
"dict",
"(",
"self",
".",
"spin",
".",
"linear",
")",
",",
"dict",
"(",
"self",
".",
"spin",
".",
"quadratic",
")",
",",
"self",
".",
"spin",
".",
"offset"
] | Converts a binary quadratic model to Ising format.
If the binary quadratic model's vartype is not :class:`.Vartype.SPIN`,
values are converted.
Returns:
tuple: 3-tuple of form (`linear`, `quadratic`, `offset`), where `linear`
is a dict of linear biases, `quadratic` is a dict of quadratic biases,
and `offset` is a number that represents the constant offset of the
binary quadratic model.
Examples:
This example converts a binary quadratic model to an Ising problem.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> model.to_ising() # doctest: +SKIP
({0: 1, 1: -1, 2: 0.5}, {(0, 1): 0.5, (1, 2): 1.5}, 1.4) | [
"Converts",
"a",
"binary",
"quadratic",
"model",
"to",
"Ising",
"format",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1960-L1985 | train | 212,604 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.from_ising | def from_ising(cls, h, J, offset=0.0):
"""Create a binary quadratic model from an Ising problem.
Args:
h (dict/list):
Linear biases of the Ising problem. If a dict, should be of the
form `{v: bias, ...}` where v is a spin-valued variable and `bias`
is its associated bias. If a list, it is treated as a list of
biases where the indices are the variable labels.
J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.SPIN`.
Examples:
This example creates a binary quadratic model from an Ising problem.
>>> import dimod
>>> h = {1: 1, 2: 2, 3: 3, 4: 4}
>>> J = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> model = dimod.BinaryQuadraticModel.from_ising(h, J, offset = 0.0)
>>> model # doctest: +SKIP
BinaryQuadraticModel({1: 1, 2: 2, 3: 3, 4: 4}, {(1, 2): 12, (1, 3): 13, (1, 4): 14, (2, 3): 23, (3, 4): 34, (2, 4): 24}, 0.0, Vartype.SPIN)
"""
if isinstance(h, abc.Sequence):
h = dict(enumerate(h))
return cls(h, J, offset, Vartype.SPIN) | python | def from_ising(cls, h, J, offset=0.0):
"""Create a binary quadratic model from an Ising problem.
Args:
h (dict/list):
Linear biases of the Ising problem. If a dict, should be of the
form `{v: bias, ...}` where v is a spin-valued variable and `bias`
is its associated bias. If a list, it is treated as a list of
biases where the indices are the variable labels.
J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.SPIN`.
Examples:
This example creates a binary quadratic model from an Ising problem.
>>> import dimod
>>> h = {1: 1, 2: 2, 3: 3, 4: 4}
>>> J = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> model = dimod.BinaryQuadraticModel.from_ising(h, J, offset = 0.0)
>>> model # doctest: +SKIP
BinaryQuadraticModel({1: 1, 2: 2, 3: 3, 4: 4}, {(1, 2): 12, (1, 3): 13, (1, 4): 14, (2, 3): 23, (3, 4): 34, (2, 4): 24}, 0.0, Vartype.SPIN)
"""
if isinstance(h, abc.Sequence):
h = dict(enumerate(h))
return cls(h, J, offset, Vartype.SPIN) | [
"def",
"from_ising",
"(",
"cls",
",",
"h",
",",
"J",
",",
"offset",
"=",
"0.0",
")",
":",
"if",
"isinstance",
"(",
"h",
",",
"abc",
".",
"Sequence",
")",
":",
"h",
"=",
"dict",
"(",
"enumerate",
"(",
"h",
")",
")",
"return",
"cls",
"(",
"h",
... | Create a binary quadratic model from an Ising problem.
Args:
h (dict/list):
Linear biases of the Ising problem. If a dict, should be of the
form `{v: bias, ...}` where v is a spin-valued variable and `bias`
is its associated bias. If a list, it is treated as a list of
biases where the indices are the variable labels.
J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.SPIN`.
Examples:
This example creates a binary quadratic model from an Ising problem.
>>> import dimod
>>> h = {1: 1, 2: 2, 3: 3, 4: 4}
>>> J = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> model = dimod.BinaryQuadraticModel.from_ising(h, J, offset = 0.0)
>>> model # doctest: +SKIP
BinaryQuadraticModel({1: 1, 2: 2, 3: 3, 4: 4}, {(1, 2): 12, (1, 3): 13, (1, 4): 14, (2, 3): 23, (3, 4): 34, (2, 4): 24}, 0.0, Vartype.SPIN) | [
"Create",
"a",
"binary",
"quadratic",
"model",
"from",
"an",
"Ising",
"problem",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1988-L2025 | train | 212,605 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.to_qubo | def to_qubo(self):
"""Convert a binary quadratic model to QUBO format.
If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`,
values are converted.
Returns:
tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict
in which keys are pairs of variables and values are the associated linear or
quadratic bias and `offset` is a number that represents the constant offset
of the binary quadratic model.
Examples:
This example converts a binary quadratic model with spin variables to QUBO format
with binary variables.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> model.to_qubo() # doctest: +SKIP
({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9)
"""
qubo = dict(self.binary.quadratic)
qubo.update(((v, v), bias) for v, bias in iteritems(self.binary.linear))
return qubo, self.binary.offset | python | def to_qubo(self):
"""Convert a binary quadratic model to QUBO format.
If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`,
values are converted.
Returns:
tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict
in which keys are pairs of variables and values are the associated linear or
quadratic bias and `offset` is a number that represents the constant offset
of the binary quadratic model.
Examples:
This example converts a binary quadratic model with spin variables to QUBO format
with binary variables.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> model.to_qubo() # doctest: +SKIP
({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9)
"""
qubo = dict(self.binary.quadratic)
qubo.update(((v, v), bias) for v, bias in iteritems(self.binary.linear))
return qubo, self.binary.offset | [
"def",
"to_qubo",
"(",
"self",
")",
":",
"qubo",
"=",
"dict",
"(",
"self",
".",
"binary",
".",
"quadratic",
")",
"qubo",
".",
"update",
"(",
"(",
"(",
"v",
",",
"v",
")",
",",
"bias",
")",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"self"... | Convert a binary quadratic model to QUBO format.
If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`,
values are converted.
Returns:
tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict
in which keys are pairs of variables and values are the associated linear or
quadratic bias and `offset` is a number that represents the constant offset
of the binary quadratic model.
Examples:
This example converts a binary quadratic model with spin variables to QUBO format
with binary variables.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> model.to_qubo() # doctest: +SKIP
({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9) | [
"Convert",
"a",
"binary",
"quadratic",
"model",
"to",
"QUBO",
"format",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2027-L2054 | train | 212,606 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.from_qubo | def from_qubo(cls, Q, offset=0.0):
"""Create a binary quadratic model from a QUBO model.
Args:
Q (dict):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) problem. Should be a dict of the form `{(u, v): bias, ...}`
where `u`, `v`, are binary-valued variables and `bias` is their
associated coefficient.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO model.
>>> import dimod
>>> Q = {(0, 0): -1, (1, 1): -1, (0, 1): 2}
>>> model = dimod.BinaryQuadraticModel.from_qubo(Q, offset = 0.0)
>>> model.linear # doctest: +SKIP
{0: -1, 1: -1}
>>> model.vartype
<Vartype.BINARY: frozenset({0, 1})>
"""
linear = {}
quadratic = {}
for (u, v), bias in iteritems(Q):
if u == v:
linear[u] = bias
else:
quadratic[(u, v)] = bias
return cls(linear, quadratic, offset, Vartype.BINARY) | python | def from_qubo(cls, Q, offset=0.0):
"""Create a binary quadratic model from a QUBO model.
Args:
Q (dict):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) problem. Should be a dict of the form `{(u, v): bias, ...}`
where `u`, `v`, are binary-valued variables and `bias` is their
associated coefficient.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO model.
>>> import dimod
>>> Q = {(0, 0): -1, (1, 1): -1, (0, 1): 2}
>>> model = dimod.BinaryQuadraticModel.from_qubo(Q, offset = 0.0)
>>> model.linear # doctest: +SKIP
{0: -1, 1: -1}
>>> model.vartype
<Vartype.BINARY: frozenset({0, 1})>
"""
linear = {}
quadratic = {}
for (u, v), bias in iteritems(Q):
if u == v:
linear[u] = bias
else:
quadratic[(u, v)] = bias
return cls(linear, quadratic, offset, Vartype.BINARY) | [
"def",
"from_qubo",
"(",
"cls",
",",
"Q",
",",
"offset",
"=",
"0.0",
")",
":",
"linear",
"=",
"{",
"}",
"quadratic",
"=",
"{",
"}",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"iteritems",
"(",
"Q",
")",
":",
"if",
"u",
"==",
"v",
":... | Create a binary quadratic model from a QUBO model.
Args:
Q (dict):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) problem. Should be a dict of the form `{(u, v): bias, ...}`
where `u`, `v`, are binary-valued variables and `bias` is their
associated coefficient.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO model.
>>> import dimod
>>> Q = {(0, 0): -1, (1, 1): -1, (0, 1): 2}
>>> model = dimod.BinaryQuadraticModel.from_qubo(Q, offset = 0.0)
>>> model.linear # doctest: +SKIP
{0: -1, 1: -1}
>>> model.vartype
<Vartype.BINARY: frozenset({0, 1})> | [
"Create",
"a",
"binary",
"quadratic",
"model",
"from",
"a",
"QUBO",
"model",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2057-L2094 | train | 212,607 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.to_numpy_matrix | def to_numpy_matrix(self, variable_order=None):
"""Convert a binary quadratic model to NumPy 2D array.
Args:
variable_order (list, optional):
If provided, indexes the rows/columns of the NumPy array. If `variable_order` includes
any variables not in the binary quadratic model, these are added to the NumPy array.
Returns:
:class:`numpy.ndarray`: The binary quadratic model as a NumPy 2D array. Note that the
binary quadratic model is converted to :class:`~.Vartype.BINARY` vartype.
Notes:
The matrix representation of a binary quadratic model only makes sense for binary models.
For a binary sample x, the energy of the model is given by:
.. math::
E(x) = x^T Q x
The offset is dropped when converting to a NumPy array.
Examples:
This example converts a binary quadratic model to NumPy array format while
ordering variables and adding one ('d').
>>> import dimod
>>> import numpy as np
...
>>> model = dimod.BinaryQuadraticModel({'a': 1, 'b': -1, 'c': .5},
... {('a', 'b'): .5, ('b', 'c'): 1.5},
... 1.4,
... dimod.BINARY)
>>> model.to_numpy_matrix(variable_order=['d', 'c', 'b', 'a'])
array([[ 0. , 0. , 0. , 0. ],
[ 0. , 0.5, 1.5, 0. ],
[ 0. , 0. , -1. , 0.5],
[ 0. , 0. , 0. , 1. ]])
"""
import numpy as np
if variable_order is None:
# just use the existing variable labels, assuming that they are [0, N)
num_variables = len(self)
mat = np.zeros((num_variables, num_variables), dtype=float)
try:
for v, bias in iteritems(self.binary.linear):
mat[v, v] = bias
except IndexError:
raise ValueError(("if 'variable_order' is not provided, binary quadratic model must be "
"index labeled [0, ..., N-1]"))
for (u, v), bias in iteritems(self.binary.quadratic):
if u < v:
mat[u, v] = bias
else:
mat[v, u] = bias
else:
num_variables = len(variable_order)
idx = {v: i for i, v in enumerate(variable_order)}
mat = np.zeros((num_variables, num_variables), dtype=float)
try:
for v, bias in iteritems(self.binary.linear):
mat[idx[v], idx[v]] = bias
except KeyError as e:
raise ValueError(("variable {} is missing from variable_order".format(e)))
for (u, v), bias in iteritems(self.binary.quadratic):
iu, iv = idx[u], idx[v]
if iu < iv:
mat[iu, iv] = bias
else:
mat[iv, iu] = bias
return mat | python | def to_numpy_matrix(self, variable_order=None):
"""Convert a binary quadratic model to NumPy 2D array.
Args:
variable_order (list, optional):
If provided, indexes the rows/columns of the NumPy array. If `variable_order` includes
any variables not in the binary quadratic model, these are added to the NumPy array.
Returns:
:class:`numpy.ndarray`: The binary quadratic model as a NumPy 2D array. Note that the
binary quadratic model is converted to :class:`~.Vartype.BINARY` vartype.
Notes:
The matrix representation of a binary quadratic model only makes sense for binary models.
For a binary sample x, the energy of the model is given by:
.. math::
E(x) = x^T Q x
The offset is dropped when converting to a NumPy array.
Examples:
This example converts a binary quadratic model to NumPy array format while
ordering variables and adding one ('d').
>>> import dimod
>>> import numpy as np
...
>>> model = dimod.BinaryQuadraticModel({'a': 1, 'b': -1, 'c': .5},
... {('a', 'b'): .5, ('b', 'c'): 1.5},
... 1.4,
... dimod.BINARY)
>>> model.to_numpy_matrix(variable_order=['d', 'c', 'b', 'a'])
array([[ 0. , 0. , 0. , 0. ],
[ 0. , 0.5, 1.5, 0. ],
[ 0. , 0. , -1. , 0.5],
[ 0. , 0. , 0. , 1. ]])
"""
import numpy as np
if variable_order is None:
# just use the existing variable labels, assuming that they are [0, N)
num_variables = len(self)
mat = np.zeros((num_variables, num_variables), dtype=float)
try:
for v, bias in iteritems(self.binary.linear):
mat[v, v] = bias
except IndexError:
raise ValueError(("if 'variable_order' is not provided, binary quadratic model must be "
"index labeled [0, ..., N-1]"))
for (u, v), bias in iteritems(self.binary.quadratic):
if u < v:
mat[u, v] = bias
else:
mat[v, u] = bias
else:
num_variables = len(variable_order)
idx = {v: i for i, v in enumerate(variable_order)}
mat = np.zeros((num_variables, num_variables), dtype=float)
try:
for v, bias in iteritems(self.binary.linear):
mat[idx[v], idx[v]] = bias
except KeyError as e:
raise ValueError(("variable {} is missing from variable_order".format(e)))
for (u, v), bias in iteritems(self.binary.quadratic):
iu, iv = idx[u], idx[v]
if iu < iv:
mat[iu, iv] = bias
else:
mat[iv, iu] = bias
return mat | [
"def",
"to_numpy_matrix",
"(",
"self",
",",
"variable_order",
"=",
"None",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"variable_order",
"is",
"None",
":",
"# just use the existing variable labels, assuming that they are [0, N)",
"num_variables",
"=",
"len",
"(",
"... | Convert a binary quadratic model to NumPy 2D array.
Args:
variable_order (list, optional):
If provided, indexes the rows/columns of the NumPy array. If `variable_order` includes
any variables not in the binary quadratic model, these are added to the NumPy array.
Returns:
:class:`numpy.ndarray`: The binary quadratic model as a NumPy 2D array. Note that the
binary quadratic model is converted to :class:`~.Vartype.BINARY` vartype.
Notes:
The matrix representation of a binary quadratic model only makes sense for binary models.
For a binary sample x, the energy of the model is given by:
.. math::
E(x) = x^T Q x
The offset is dropped when converting to a NumPy array.
Examples:
This example converts a binary quadratic model to NumPy array format while
ordering variables and adding one ('d').
>>> import dimod
>>> import numpy as np
...
>>> model = dimod.BinaryQuadraticModel({'a': 1, 'b': -1, 'c': .5},
... {('a', 'b'): .5, ('b', 'c'): 1.5},
... 1.4,
... dimod.BINARY)
>>> model.to_numpy_matrix(variable_order=['d', 'c', 'b', 'a'])
array([[ 0. , 0. , 0. , 0. ],
[ 0. , 0.5, 1.5, 0. ],
[ 0. , 0. , -1. , 0.5],
[ 0. , 0. , 0. , 1. ]]) | [
"Convert",
"a",
"binary",
"quadratic",
"model",
"to",
"NumPy",
"2D",
"array",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2096-L2175 | train | 212,608 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.from_numpy_matrix | def from_numpy_matrix(cls, mat, variable_order=None, offset=0.0, interactions=None):
"""Create a binary quadratic model from a NumPy array.
Args:
mat (:class:`numpy.ndarray`):
Coefficients of a quadratic unconstrained binary optimization (QUBO)
model formatted as a square NumPy 2D array.
variable_order (list, optional):
If provided, labels the QUBO variables; otherwise, row/column indices are used.
If `variable_order` is longer than the array, extra values are ignored.
offset (optional, default=0.0):
Constant offset for the binary quadratic model.
interactions (iterable, optional, default=[]):
Any additional 0.0-bias interactions to be added to the binary quadratic model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO in NumPy format while
adding an interaction with a new variable ('f'), ignoring an extra variable
('g'), and setting an offset.
>>> import dimod
>>> import numpy as np
>>> Q = np.array([[1, 0, 0, 10, 11],
... [0, 2, 0, 12, 13],
... [0, 0, 3, 14, 15],
... [0, 0, 0, 4, 0],
... [0, 0, 0, 0, 5]]).astype(np.float32)
>>> model = dimod.BinaryQuadraticModel.from_numpy_matrix(Q,
... variable_order = ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
... offset = 2.5,
... interactions = {('a', 'f')})
>>> model.linear # doctest: +SKIP
{'a': 1.0, 'b': 2.0, 'c': 3.0, 'd': 4.0, 'e': 5.0, 'f': 0.0}
>>> model.quadratic[('a', 'd')]
10.0
>>> model.quadratic[('a', 'f')]
0.0
>>> model.offset
2.5
"""
import numpy as np
if mat.ndim != 2:
raise ValueError("expected input mat to be a square 2D numpy array")
num_row, num_col = mat.shape
if num_col != num_row:
raise ValueError("expected input mat to be a square 2D numpy array")
if variable_order is None:
variable_order = list(range(num_row))
if interactions is None:
interactions = []
bqm = cls({}, {}, offset, Vartype.BINARY)
for (row, col), bias in np.ndenumerate(mat):
if row == col:
bqm.add_variable(variable_order[row], bias)
elif bias:
bqm.add_interaction(variable_order[row], variable_order[col], bias)
for u, v in interactions:
bqm.add_interaction(u, v, 0.0)
return bqm | python | def from_numpy_matrix(cls, mat, variable_order=None, offset=0.0, interactions=None):
"""Create a binary quadratic model from a NumPy array.
Args:
mat (:class:`numpy.ndarray`):
Coefficients of a quadratic unconstrained binary optimization (QUBO)
model formatted as a square NumPy 2D array.
variable_order (list, optional):
If provided, labels the QUBO variables; otherwise, row/column indices are used.
If `variable_order` is longer than the array, extra values are ignored.
offset (optional, default=0.0):
Constant offset for the binary quadratic model.
interactions (iterable, optional, default=[]):
Any additional 0.0-bias interactions to be added to the binary quadratic model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO in NumPy format while
adding an interaction with a new variable ('f'), ignoring an extra variable
('g'), and setting an offset.
>>> import dimod
>>> import numpy as np
>>> Q = np.array([[1, 0, 0, 10, 11],
... [0, 2, 0, 12, 13],
... [0, 0, 3, 14, 15],
... [0, 0, 0, 4, 0],
... [0, 0, 0, 0, 5]]).astype(np.float32)
>>> model = dimod.BinaryQuadraticModel.from_numpy_matrix(Q,
... variable_order = ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
... offset = 2.5,
... interactions = {('a', 'f')})
>>> model.linear # doctest: +SKIP
{'a': 1.0, 'b': 2.0, 'c': 3.0, 'd': 4.0, 'e': 5.0, 'f': 0.0}
>>> model.quadratic[('a', 'd')]
10.0
>>> model.quadratic[('a', 'f')]
0.0
>>> model.offset
2.5
"""
import numpy as np
if mat.ndim != 2:
raise ValueError("expected input mat to be a square 2D numpy array")
num_row, num_col = mat.shape
if num_col != num_row:
raise ValueError("expected input mat to be a square 2D numpy array")
if variable_order is None:
variable_order = list(range(num_row))
if interactions is None:
interactions = []
bqm = cls({}, {}, offset, Vartype.BINARY)
for (row, col), bias in np.ndenumerate(mat):
if row == col:
bqm.add_variable(variable_order[row], bias)
elif bias:
bqm.add_interaction(variable_order[row], variable_order[col], bias)
for u, v in interactions:
bqm.add_interaction(u, v, 0.0)
return bqm | [
"def",
"from_numpy_matrix",
"(",
"cls",
",",
"mat",
",",
"variable_order",
"=",
"None",
",",
"offset",
"=",
"0.0",
",",
"interactions",
"=",
"None",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"mat",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError... | Create a binary quadratic model from a NumPy array.
Args:
mat (:class:`numpy.ndarray`):
Coefficients of a quadratic unconstrained binary optimization (QUBO)
model formatted as a square NumPy 2D array.
variable_order (list, optional):
If provided, labels the QUBO variables; otherwise, row/column indices are used.
If `variable_order` is longer than the array, extra values are ignored.
offset (optional, default=0.0):
Constant offset for the binary quadratic model.
interactions (iterable, optional, default=[]):
Any additional 0.0-bias interactions to be added to the binary quadratic model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`.Vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO in NumPy format while
adding an interaction with a new variable ('f'), ignoring an extra variable
('g'), and setting an offset.
>>> import dimod
>>> import numpy as np
>>> Q = np.array([[1, 0, 0, 10, 11],
... [0, 2, 0, 12, 13],
... [0, 0, 3, 14, 15],
... [0, 0, 0, 4, 0],
... [0, 0, 0, 0, 5]]).astype(np.float32)
>>> model = dimod.BinaryQuadraticModel.from_numpy_matrix(Q,
... variable_order = ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
... offset = 2.5,
... interactions = {('a', 'f')})
>>> model.linear # doctest: +SKIP
{'a': 1.0, 'b': 2.0, 'c': 3.0, 'd': 4.0, 'e': 5.0, 'f': 0.0}
>>> model.quadratic[('a', 'd')]
10.0
>>> model.quadratic[('a', 'f')]
0.0
>>> model.offset
2.5 | [
"Create",
"a",
"binary",
"quadratic",
"model",
"from",
"a",
"NumPy",
"array",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2178-L2253 | train | 212,609 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.to_numpy_vectors | def to_numpy_vectors(self, variable_order=None, dtype=np.float, index_dtype=np.int64, sort_indices=False):
"""Convert a binary quadratic model to numpy arrays.
Args:
variable_order (iterable, optional):
If provided, labels the variables; otherwise, row/column indices are used.
dtype (:class:`numpy.dtype`, optional):
Data-type of the biases. By default, the data-type is inferred from the biases.
index_dtype (:class:`numpy.dtype`, optional):
Data-type of the indices. By default, the data-type is inferred from the labels.
sort_indices (bool, optional, default=False):
If True, the indices are sorted, first by row then by column. Otherwise they
match :attr:`~.BinaryQuadraticModel.quadratic`.
Returns:
:obj:`~numpy.ndarray`: A numpy array of the linear biases.
tuple: The quadratic biases in COOrdinate format.
:obj:`~numpy.ndarray`: A numpy array of the row indices of the quadratic matrix
entries
:obj:`~numpy.ndarray`: A numpy array of the column indices of the quadratic matrix
entries
:obj:`~numpy.ndarray`: A numpy array of the values of the quadratic matrix
entries
The offset
Examples:
>>> bqm = dimod.BinaryQuadraticModel({}, {(0, 1): .5, (3, 2): -1, (0, 3): 1.5}, 0.0, dimod.SPIN)
>>> lin, (i, j, vals), off = bqm.to_numpy_vectors(sort_indices=True)
>>> lin
array([0., 0., 0., 0.])
>>> i
array([0, 0, 2])
>>> j
array([1, 3, 3])
>>> vals
array([ 0.5, 1.5, -1. ])
"""
linear = self.linear
quadratic = self.quadratic
num_variables = len(linear)
num_interactions = len(quadratic)
irow = np.empty(num_interactions, dtype=index_dtype)
icol = np.empty(num_interactions, dtype=index_dtype)
qdata = np.empty(num_interactions, dtype=dtype)
if variable_order is None:
try:
ldata = np.fromiter((linear[v] for v in range(num_variables)), count=num_variables, dtype=dtype)
except KeyError:
raise ValueError(("if 'variable_order' is not provided, binary quadratic model must be "
"index labeled [0, ..., N-1]"))
# we could speed this up a lot with cython
for idx, ((u, v), bias) in enumerate(quadratic.items()):
irow[idx] = u
icol[idx] = v
qdata[idx] = bias
else:
try:
ldata = np.fromiter((linear[v] for v in variable_order), count=num_variables, dtype=dtype)
except KeyError:
raise ValueError("provided 'variable_order' does not match binary quadratic model")
label_to_idx = {v: idx for idx, v in enumerate(variable_order)}
# we could speed this up a lot with cython
for idx, ((u, v), bias) in enumerate(quadratic.items()):
irow[idx] = label_to_idx[u]
icol[idx] = label_to_idx[v]
qdata[idx] = bias
if sort_indices:
# row index should be less than col index, this handles upper-triangular vs lower-triangular
swaps = irow > icol
if swaps.any():
# in-place
irow[swaps], icol[swaps] = icol[swaps], irow[swaps]
# sort lexigraphically
order = np.lexsort((irow, icol))
if not (order == range(len(order))).all():
# copy
irow = irow[order]
icol = icol[order]
qdata = qdata[order]
return ldata, (irow, icol, qdata), ldata.dtype.type(self.offset) | python | def to_numpy_vectors(self, variable_order=None, dtype=np.float, index_dtype=np.int64, sort_indices=False):
"""Convert a binary quadratic model to numpy arrays.
Args:
variable_order (iterable, optional):
If provided, labels the variables; otherwise, row/column indices are used.
dtype (:class:`numpy.dtype`, optional):
Data-type of the biases. By default, the data-type is inferred from the biases.
index_dtype (:class:`numpy.dtype`, optional):
Data-type of the indices. By default, the data-type is inferred from the labels.
sort_indices (bool, optional, default=False):
If True, the indices are sorted, first by row then by column. Otherwise they
match :attr:`~.BinaryQuadraticModel.quadratic`.
Returns:
:obj:`~numpy.ndarray`: A numpy array of the linear biases.
tuple: The quadratic biases in COOrdinate format.
:obj:`~numpy.ndarray`: A numpy array of the row indices of the quadratic matrix
entries
:obj:`~numpy.ndarray`: A numpy array of the column indices of the quadratic matrix
entries
:obj:`~numpy.ndarray`: A numpy array of the values of the quadratic matrix
entries
The offset
Examples:
>>> bqm = dimod.BinaryQuadraticModel({}, {(0, 1): .5, (3, 2): -1, (0, 3): 1.5}, 0.0, dimod.SPIN)
>>> lin, (i, j, vals), off = bqm.to_numpy_vectors(sort_indices=True)
>>> lin
array([0., 0., 0., 0.])
>>> i
array([0, 0, 2])
>>> j
array([1, 3, 3])
>>> vals
array([ 0.5, 1.5, -1. ])
"""
linear = self.linear
quadratic = self.quadratic
num_variables = len(linear)
num_interactions = len(quadratic)
irow = np.empty(num_interactions, dtype=index_dtype)
icol = np.empty(num_interactions, dtype=index_dtype)
qdata = np.empty(num_interactions, dtype=dtype)
if variable_order is None:
try:
ldata = np.fromiter((linear[v] for v in range(num_variables)), count=num_variables, dtype=dtype)
except KeyError:
raise ValueError(("if 'variable_order' is not provided, binary quadratic model must be "
"index labeled [0, ..., N-1]"))
# we could speed this up a lot with cython
for idx, ((u, v), bias) in enumerate(quadratic.items()):
irow[idx] = u
icol[idx] = v
qdata[idx] = bias
else:
try:
ldata = np.fromiter((linear[v] for v in variable_order), count=num_variables, dtype=dtype)
except KeyError:
raise ValueError("provided 'variable_order' does not match binary quadratic model")
label_to_idx = {v: idx for idx, v in enumerate(variable_order)}
# we could speed this up a lot with cython
for idx, ((u, v), bias) in enumerate(quadratic.items()):
irow[idx] = label_to_idx[u]
icol[idx] = label_to_idx[v]
qdata[idx] = bias
if sort_indices:
# row index should be less than col index, this handles upper-triangular vs lower-triangular
swaps = irow > icol
if swaps.any():
# in-place
irow[swaps], icol[swaps] = icol[swaps], irow[swaps]
# sort lexigraphically
order = np.lexsort((irow, icol))
if not (order == range(len(order))).all():
# copy
irow = irow[order]
icol = icol[order]
qdata = qdata[order]
return ldata, (irow, icol, qdata), ldata.dtype.type(self.offset) | [
"def",
"to_numpy_vectors",
"(",
"self",
",",
"variable_order",
"=",
"None",
",",
"dtype",
"=",
"np",
".",
"float",
",",
"index_dtype",
"=",
"np",
".",
"int64",
",",
"sort_indices",
"=",
"False",
")",
":",
"linear",
"=",
"self",
".",
"linear",
"quadratic"... | Convert a binary quadratic model to numpy arrays.
Args:
variable_order (iterable, optional):
If provided, labels the variables; otherwise, row/column indices are used.
dtype (:class:`numpy.dtype`, optional):
Data-type of the biases. By default, the data-type is inferred from the biases.
index_dtype (:class:`numpy.dtype`, optional):
Data-type of the indices. By default, the data-type is inferred from the labels.
sort_indices (bool, optional, default=False):
If True, the indices are sorted, first by row then by column. Otherwise they
match :attr:`~.BinaryQuadraticModel.quadratic`.
Returns:
:obj:`~numpy.ndarray`: A numpy array of the linear biases.
tuple: The quadratic biases in COOrdinate format.
:obj:`~numpy.ndarray`: A numpy array of the row indices of the quadratic matrix
entries
:obj:`~numpy.ndarray`: A numpy array of the column indices of the quadratic matrix
entries
:obj:`~numpy.ndarray`: A numpy array of the values of the quadratic matrix
entries
The offset
Examples:
>>> bqm = dimod.BinaryQuadraticModel({}, {(0, 1): .5, (3, 2): -1, (0, 3): 1.5}, 0.0, dimod.SPIN)
>>> lin, (i, j, vals), off = bqm.to_numpy_vectors(sort_indices=True)
>>> lin
array([0., 0., 0., 0.])
>>> i
array([0, 0, 2])
>>> j
array([1, 3, 3])
>>> vals
array([ 0.5, 1.5, -1. ]) | [
"Convert",
"a",
"binary",
"quadratic",
"model",
"to",
"numpy",
"arrays",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2255-L2353 | train | 212,610 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.from_numpy_vectors | def from_numpy_vectors(cls, linear, quadratic, offset, vartype, variable_order=None):
"""Create a binary quadratic model from vectors.
Args:
linear (array_like):
A 1D array-like iterable of linear biases.
quadratic (tuple[array_like, array_like, array_like]):
A 3-tuple of 1D array_like vectors of the form (row, col, bias).
offset (numeric, optional):
Constant offset for the binary quadratic model.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_order (iterable, optional):
If provided, labels the variables; otherwise, indices are used.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import dimod
>>> import numpy as np
...
>>> linear_vector = np.asarray([-1, 1])
>>> quadratic_vectors = (np.asarray([0]), np.asarray([1]), np.asarray([-1.0]))
>>> bqm = dimod.BinaryQuadraticModel.from_numpy_vectors(linear_vector, quadratic_vectors, 0.0, dimod.SPIN)
>>> print(bqm.quadratic)
{(0, 1): -1.0}
"""
try:
heads, tails, values = quadratic
except ValueError:
raise ValueError("quadratic should be a 3-tuple")
if not len(heads) == len(tails) == len(values):
raise ValueError("row, col, and bias should be of equal length")
if variable_order is None:
variable_order = list(range(len(linear)))
linear = {v: float(bias) for v, bias in zip(variable_order, linear)}
quadratic = {(variable_order[u], variable_order[v]): float(bias)
for u, v, bias in zip(heads, tails, values)}
return cls(linear, quadratic, offset, vartype) | python | def from_numpy_vectors(cls, linear, quadratic, offset, vartype, variable_order=None):
"""Create a binary quadratic model from vectors.
Args:
linear (array_like):
A 1D array-like iterable of linear biases.
quadratic (tuple[array_like, array_like, array_like]):
A 3-tuple of 1D array_like vectors of the form (row, col, bias).
offset (numeric, optional):
Constant offset for the binary quadratic model.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_order (iterable, optional):
If provided, labels the variables; otherwise, indices are used.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import dimod
>>> import numpy as np
...
>>> linear_vector = np.asarray([-1, 1])
>>> quadratic_vectors = (np.asarray([0]), np.asarray([1]), np.asarray([-1.0]))
>>> bqm = dimod.BinaryQuadraticModel.from_numpy_vectors(linear_vector, quadratic_vectors, 0.0, dimod.SPIN)
>>> print(bqm.quadratic)
{(0, 1): -1.0}
"""
try:
heads, tails, values = quadratic
except ValueError:
raise ValueError("quadratic should be a 3-tuple")
if not len(heads) == len(tails) == len(values):
raise ValueError("row, col, and bias should be of equal length")
if variable_order is None:
variable_order = list(range(len(linear)))
linear = {v: float(bias) for v, bias in zip(variable_order, linear)}
quadratic = {(variable_order[u], variable_order[v]): float(bias)
for u, v, bias in zip(heads, tails, values)}
return cls(linear, quadratic, offset, vartype) | [
"def",
"from_numpy_vectors",
"(",
"cls",
",",
"linear",
",",
"quadratic",
",",
"offset",
",",
"vartype",
",",
"variable_order",
"=",
"None",
")",
":",
"try",
":",
"heads",
",",
"tails",
",",
"values",
"=",
"quadratic",
"except",
"ValueError",
":",
"raise",... | Create a binary quadratic model from vectors.
Args:
linear (array_like):
A 1D array-like iterable of linear biases.
quadratic (tuple[array_like, array_like, array_like]):
A 3-tuple of 1D array_like vectors of the form (row, col, bias).
offset (numeric, optional):
Constant offset for the binary quadratic model.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_order (iterable, optional):
If provided, labels the variables; otherwise, indices are used.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import dimod
>>> import numpy as np
...
>>> linear_vector = np.asarray([-1, 1])
>>> quadratic_vectors = (np.asarray([0]), np.asarray([1]), np.asarray([-1.0]))
>>> bqm = dimod.BinaryQuadraticModel.from_numpy_vectors(linear_vector, quadratic_vectors, 0.0, dimod.SPIN)
>>> print(bqm.quadratic)
{(0, 1): -1.0} | [
"Create",
"a",
"binary",
"quadratic",
"model",
"from",
"vectors",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2356-L2408 | train | 212,611 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.to_pandas_dataframe | def to_pandas_dataframe(self):
"""Convert a binary quadratic model to pandas DataFrame format.
Returns:
:class:`pandas.DataFrame`: The binary quadratic model as a DataFrame. The DataFrame has
binary vartype. The rows and columns are labeled by the variables in the binary quadratic
model.
Notes:
The DataFrame representation of a binary quadratic model only makes sense for binary models.
For a binary sample x, the energy of the model is given by:
.. math::
E(x) = x^T Q x
The offset is dropped when converting to a pandas DataFrame.
Examples:
This example converts a binary quadratic model to pandas DataFrame format.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({'a': 1.1, 'b': -1., 'c': .5},
... {('a', 'b'): .5, ('b', 'c'): 1.5},
... 1.4,
... dimod.BINARY)
>>> model.to_pandas_dataframe() # doctest: +SKIP
a b c
a 1.1 0.5 0.0
b 0.0 -1.0 1.5
c 0.0 0.0 0.5
"""
import pandas as pd
try:
variable_order = sorted(self.linear)
except TypeError:
variable_order = list(self.linear)
return pd.DataFrame(self.to_numpy_matrix(variable_order=variable_order),
index=variable_order,
columns=variable_order) | python | def to_pandas_dataframe(self):
"""Convert a binary quadratic model to pandas DataFrame format.
Returns:
:class:`pandas.DataFrame`: The binary quadratic model as a DataFrame. The DataFrame has
binary vartype. The rows and columns are labeled by the variables in the binary quadratic
model.
Notes:
The DataFrame representation of a binary quadratic model only makes sense for binary models.
For a binary sample x, the energy of the model is given by:
.. math::
E(x) = x^T Q x
The offset is dropped when converting to a pandas DataFrame.
Examples:
This example converts a binary quadratic model to pandas DataFrame format.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({'a': 1.1, 'b': -1., 'c': .5},
... {('a', 'b'): .5, ('b', 'c'): 1.5},
... 1.4,
... dimod.BINARY)
>>> model.to_pandas_dataframe() # doctest: +SKIP
a b c
a 1.1 0.5 0.0
b 0.0 -1.0 1.5
c 0.0 0.0 0.5
"""
import pandas as pd
try:
variable_order = sorted(self.linear)
except TypeError:
variable_order = list(self.linear)
return pd.DataFrame(self.to_numpy_matrix(variable_order=variable_order),
index=variable_order,
columns=variable_order) | [
"def",
"to_pandas_dataframe",
"(",
"self",
")",
":",
"import",
"pandas",
"as",
"pd",
"try",
":",
"variable_order",
"=",
"sorted",
"(",
"self",
".",
"linear",
")",
"except",
"TypeError",
":",
"variable_order",
"=",
"list",
"(",
"self",
".",
"linear",
")",
... | Convert a binary quadratic model to pandas DataFrame format.
Returns:
:class:`pandas.DataFrame`: The binary quadratic model as a DataFrame. The DataFrame has
binary vartype. The rows and columns are labeled by the variables in the binary quadratic
model.
Notes:
The DataFrame representation of a binary quadratic model only makes sense for binary models.
For a binary sample x, the energy of the model is given by:
.. math::
E(x) = x^T Q x
The offset is dropped when converting to a pandas DataFrame.
Examples:
This example converts a binary quadratic model to pandas DataFrame format.
>>> import dimod
>>> model = dimod.BinaryQuadraticModel({'a': 1.1, 'b': -1., 'c': .5},
... {('a', 'b'): .5, ('b', 'c'): 1.5},
... 1.4,
... dimod.BINARY)
>>> model.to_pandas_dataframe() # doctest: +SKIP
a b c
a 1.1 0.5 0.0
b 0.0 -1.0 1.5
c 0.0 0.0 0.5 | [
"Convert",
"a",
"binary",
"quadratic",
"model",
"to",
"pandas",
"DataFrame",
"format",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2410-L2453 | train | 212,612 |
dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.from_pandas_dataframe | def from_pandas_dataframe(cls, bqm_df, offset=0.0, interactions=None):
"""Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame.
Args:
bqm_df (:class:`pandas.DataFrame`):
Quadratic unconstrained binary optimization (QUBO) model formatted
as a pandas DataFrame. Row and column indices label the QUBO variables;
values are QUBO coefficients.
offset (optional, default=0.0):
Constant offset for the binary quadratic model.
interactions (iterable, optional, default=[]):
Any additional 0.0-bias interactions to be added to the binary quadratic model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO in pandas DataFrame format
while adding an interaction and setting a constant offset.
>>> import dimod
>>> import pandas as pd
>>> pd_qubo = pd.DataFrame(data={0: [-1, 0], 1: [2, -1]})
>>> pd_qubo
0 1
0 -1 2
1 0 -1
>>> model = dimod.BinaryQuadraticModel.from_pandas_dataframe(pd_qubo,
... offset = 2.5,
... interactions = {(0,2), (1,2)})
>>> model.linear # doctest: +SKIP
{0: -1, 1: -1.0, 2: 0.0}
>>> model.quadratic # doctest: +SKIP
{(0, 1): 2, (0, 2): 0.0, (1, 2): 0.0}
>>> model.offset
2.5
>>> model.vartype
<Vartype.BINARY: frozenset({0, 1})>
"""
if interactions is None:
interactions = []
bqm = cls({}, {}, offset, Vartype.BINARY)
for u, row in bqm_df.iterrows():
for v, bias in row.iteritems():
if u == v:
bqm.add_variable(u, bias)
elif bias:
bqm.add_interaction(u, v, bias)
for u, v in interactions:
bqm.add_interaction(u, v, 0.0)
return bqm | python | def from_pandas_dataframe(cls, bqm_df, offset=0.0, interactions=None):
"""Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame.
Args:
bqm_df (:class:`pandas.DataFrame`):
Quadratic unconstrained binary optimization (QUBO) model formatted
as a pandas DataFrame. Row and column indices label the QUBO variables;
values are QUBO coefficients.
offset (optional, default=0.0):
Constant offset for the binary quadratic model.
interactions (iterable, optional, default=[]):
Any additional 0.0-bias interactions to be added to the binary quadratic model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO in pandas DataFrame format
while adding an interaction and setting a constant offset.
>>> import dimod
>>> import pandas as pd
>>> pd_qubo = pd.DataFrame(data={0: [-1, 0], 1: [2, -1]})
>>> pd_qubo
0 1
0 -1 2
1 0 -1
>>> model = dimod.BinaryQuadraticModel.from_pandas_dataframe(pd_qubo,
... offset = 2.5,
... interactions = {(0,2), (1,2)})
>>> model.linear # doctest: +SKIP
{0: -1, 1: -1.0, 2: 0.0}
>>> model.quadratic # doctest: +SKIP
{(0, 1): 2, (0, 2): 0.0, (1, 2): 0.0}
>>> model.offset
2.5
>>> model.vartype
<Vartype.BINARY: frozenset({0, 1})>
"""
if interactions is None:
interactions = []
bqm = cls({}, {}, offset, Vartype.BINARY)
for u, row in bqm_df.iterrows():
for v, bias in row.iteritems():
if u == v:
bqm.add_variable(u, bias)
elif bias:
bqm.add_interaction(u, v, bias)
for u, v in interactions:
bqm.add_interaction(u, v, 0.0)
return bqm | [
"def",
"from_pandas_dataframe",
"(",
"cls",
",",
"bqm_df",
",",
"offset",
"=",
"0.0",
",",
"interactions",
"=",
"None",
")",
":",
"if",
"interactions",
"is",
"None",
":",
"interactions",
"=",
"[",
"]",
"bqm",
"=",
"cls",
"(",
"{",
"}",
",",
"{",
"}",... | Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame.
Args:
bqm_df (:class:`pandas.DataFrame`):
Quadratic unconstrained binary optimization (QUBO) model formatted
as a pandas DataFrame. Row and column indices label the QUBO variables;
values are QUBO coefficients.
offset (optional, default=0.0):
Constant offset for the binary quadratic model.
interactions (iterable, optional, default=[]):
Any additional 0.0-bias interactions to be added to the binary quadratic model.
Returns:
:class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to
:class:`vartype.BINARY`.
Examples:
This example creates a binary quadratic model from a QUBO in pandas DataFrame format
while adding an interaction and setting a constant offset.
>>> import dimod
>>> import pandas as pd
>>> pd_qubo = pd.DataFrame(data={0: [-1, 0], 1: [2, -1]})
>>> pd_qubo
0 1
0 -1 2
1 0 -1
>>> model = dimod.BinaryQuadraticModel.from_pandas_dataframe(pd_qubo,
... offset = 2.5,
... interactions = {(0,2), (1,2)})
>>> model.linear # doctest: +SKIP
{0: -1, 1: -1.0, 2: 0.0}
>>> model.quadratic # doctest: +SKIP
{(0, 1): 2, (0, 2): 0.0, (1, 2): 0.0}
>>> model.offset
2.5
>>> model.vartype
<Vartype.BINARY: frozenset({0, 1})> | [
"Create",
"a",
"binary",
"quadratic",
"model",
"from",
"a",
"QUBO",
"model",
"formatted",
"as",
"a",
"pandas",
"DataFrame",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2456-L2514 | train | 212,613 |
dwavesystems/dimod | dimod/utilities.py | ising_energy | def ising_energy(sample, h, J, offset=0.0):
"""Calculate the energy for the specified sample of an Ising model.
Energy of a sample for a binary quadratic model is defined as a sum, offset
by the constant energy offset associated with the model, of
the sample multipled by the linear bias of the variable and
all its interactions. For an Ising model,
.. math::
E(\mathbf{s}) = \sum_v h_v s_v + \sum_{u,v} J_{u,v} s_u s_v + c
where :math:`s_v` is the sample, :math:`h_v` is the linear bias, :math:`J_{u,v}`
the quadratic bias (interactions), and :math:`c` the energy offset.
Args:
sample (dict[variable, spin]):
Sample for a binary quadratic model as a dict of form {v: spin, ...},
where keys are variables of the model and values are spins (either -1 or 1).
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
float: The induced energy.
Notes:
No input checking is performed.
Examples:
This example calculates the energy of a sample representing two down spins for
an Ising model of two variables that have positive biases of value 1 and
are positively coupled with an interaction of value 1.
>>> import dimod
>>> sample = {1: -1, 2: -1}
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_energy(sample, h, J, 0.5)
-0.5
References
----------
`Ising model on Wikipedia <https://en.wikipedia.org/wiki/Ising_model>`_
"""
# add the contribution from the linear biases
for v in h:
offset += h[v] * sample[v]
# add the contribution from the quadratic biases
for v0, v1 in J:
offset += J[(v0, v1)] * sample[v0] * sample[v1]
return offset | python | def ising_energy(sample, h, J, offset=0.0):
"""Calculate the energy for the specified sample of an Ising model.
Energy of a sample for a binary quadratic model is defined as a sum, offset
by the constant energy offset associated with the model, of
the sample multipled by the linear bias of the variable and
all its interactions. For an Ising model,
.. math::
E(\mathbf{s}) = \sum_v h_v s_v + \sum_{u,v} J_{u,v} s_u s_v + c
where :math:`s_v` is the sample, :math:`h_v` is the linear bias, :math:`J_{u,v}`
the quadratic bias (interactions), and :math:`c` the energy offset.
Args:
sample (dict[variable, spin]):
Sample for a binary quadratic model as a dict of form {v: spin, ...},
where keys are variables of the model and values are spins (either -1 or 1).
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
float: The induced energy.
Notes:
No input checking is performed.
Examples:
This example calculates the energy of a sample representing two down spins for
an Ising model of two variables that have positive biases of value 1 and
are positively coupled with an interaction of value 1.
>>> import dimod
>>> sample = {1: -1, 2: -1}
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_energy(sample, h, J, 0.5)
-0.5
References
----------
`Ising model on Wikipedia <https://en.wikipedia.org/wiki/Ising_model>`_
"""
# add the contribution from the linear biases
for v in h:
offset += h[v] * sample[v]
# add the contribution from the quadratic biases
for v0, v1 in J:
offset += J[(v0, v1)] * sample[v0] * sample[v1]
return offset | [
"def",
"ising_energy",
"(",
"sample",
",",
"h",
",",
"J",
",",
"offset",
"=",
"0.0",
")",
":",
"# add the contribution from the linear biases",
"for",
"v",
"in",
"h",
":",
"offset",
"+=",
"h",
"[",
"v",
"]",
"*",
"sample",
"[",
"v",
"]",
"# add the contr... | Calculate the energy for the specified sample of an Ising model.
Energy of a sample for a binary quadratic model is defined as a sum, offset
by the constant energy offset associated with the model, of
the sample multipled by the linear bias of the variable and
all its interactions. For an Ising model,
.. math::
E(\mathbf{s}) = \sum_v h_v s_v + \sum_{u,v} J_{u,v} s_u s_v + c
where :math:`s_v` is the sample, :math:`h_v` is the linear bias, :math:`J_{u,v}`
the quadratic bias (interactions), and :math:`c` the energy offset.
Args:
sample (dict[variable, spin]):
Sample for a binary quadratic model as a dict of form {v: spin, ...},
where keys are variables of the model and values are spins (either -1 or 1).
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
float: The induced energy.
Notes:
No input checking is performed.
Examples:
This example calculates the energy of a sample representing two down spins for
an Ising model of two variables that have positive biases of value 1 and
are positively coupled with an interaction of value 1.
>>> import dimod
>>> sample = {1: -1, 2: -1}
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_energy(sample, h, J, 0.5)
-0.5
References
----------
`Ising model on Wikipedia <https://en.wikipedia.org/wiki/Ising_model>`_ | [
"Calculate",
"the",
"energy",
"for",
"the",
"specified",
"sample",
"of",
"an",
"Ising",
"model",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/utilities.py#L26-L87 | train | 212,614 |
dwavesystems/dimod | dimod/utilities.py | qubo_energy | def qubo_energy(sample, Q, offset=0.0):
"""Calculate the energy for the specified sample of a QUBO model.
Energy of a sample for a binary quadratic model is defined as a sum, offset
by the constant energy offset associated with the model, of
the sample multipled by the linear bias of the variable and
all its interactions. For a quadratic unconstrained binary optimization (QUBO)
model,
.. math::
E(\mathbf{x}) = \sum_{u,v} Q_{u,v} x_u x_v + c
where :math:`x_v` is the sample, :math:`Q_{u,v}`
a matrix of biases, and :math:`c` the energy offset.
Args:
sample (dict[variable, spin]):
Sample for a binary quadratic model as a dict of form {v: bin, ...},
where keys are variables of the model and values are binary (either 0 or 1).
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
float: The induced energy.
Notes:
No input checking is performed.
Examples:
This example calculates the energy of a sample representing two zeros for
a QUBO model of two variables that have positive biases of value 1 and
are positively coupled with an interaction of value 1.
>>> import dimod
>>> sample = {1: 0, 2: 0}
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_energy(sample, Q, 0.5)
0.5
References
----------
`QUBO model on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_
"""
for v0, v1 in Q:
offset += sample[v0] * sample[v1] * Q[(v0, v1)]
return offset | python | def qubo_energy(sample, Q, offset=0.0):
"""Calculate the energy for the specified sample of a QUBO model.
Energy of a sample for a binary quadratic model is defined as a sum, offset
by the constant energy offset associated with the model, of
the sample multipled by the linear bias of the variable and
all its interactions. For a quadratic unconstrained binary optimization (QUBO)
model,
.. math::
E(\mathbf{x}) = \sum_{u,v} Q_{u,v} x_u x_v + c
where :math:`x_v` is the sample, :math:`Q_{u,v}`
a matrix of biases, and :math:`c` the energy offset.
Args:
sample (dict[variable, spin]):
Sample for a binary quadratic model as a dict of form {v: bin, ...},
where keys are variables of the model and values are binary (either 0 or 1).
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
float: The induced energy.
Notes:
No input checking is performed.
Examples:
This example calculates the energy of a sample representing two zeros for
a QUBO model of two variables that have positive biases of value 1 and
are positively coupled with an interaction of value 1.
>>> import dimod
>>> sample = {1: 0, 2: 0}
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_energy(sample, Q, 0.5)
0.5
References
----------
`QUBO model on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_
"""
for v0, v1 in Q:
offset += sample[v0] * sample[v1] * Q[(v0, v1)]
return offset | [
"def",
"qubo_energy",
"(",
"sample",
",",
"Q",
",",
"offset",
"=",
"0.0",
")",
":",
"for",
"v0",
",",
"v1",
"in",
"Q",
":",
"offset",
"+=",
"sample",
"[",
"v0",
"]",
"*",
"sample",
"[",
"v1",
"]",
"*",
"Q",
"[",
"(",
"v0",
",",
"v1",
")",
"... | Calculate the energy for the specified sample of a QUBO model.
Energy of a sample for a binary quadratic model is defined as a sum, offset
by the constant energy offset associated with the model, of
the sample multipled by the linear bias of the variable and
all its interactions. For a quadratic unconstrained binary optimization (QUBO)
model,
.. math::
E(\mathbf{x}) = \sum_{u,v} Q_{u,v} x_u x_v + c
where :math:`x_v` is the sample, :math:`Q_{u,v}`
a matrix of biases, and :math:`c` the energy offset.
Args:
sample (dict[variable, spin]):
Sample for a binary quadratic model as a dict of form {v: bin, ...},
where keys are variables of the model and values are binary (either 0 or 1).
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
float: The induced energy.
Notes:
No input checking is performed.
Examples:
This example calculates the energy of a sample representing two zeros for
a QUBO model of two variables that have positive biases of value 1 and
are positively coupled with an interaction of value 1.
>>> import dimod
>>> sample = {1: 0, 2: 0}
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_energy(sample, Q, 0.5)
0.5
References
----------
`QUBO model on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_ | [
"Calculate",
"the",
"energy",
"for",
"the",
"specified",
"sample",
"of",
"a",
"QUBO",
"model",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/utilities.py#L90-L144 | train | 212,615 |
dwavesystems/dimod | dimod/utilities.py | ising_to_qubo | def ising_to_qubo(h, J, offset=0.0):
"""Convert an Ising problem to a QUBO problem.
Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic
unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over
binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q.
Return matrix Q that defines the model as well as the offset in energy between the two
problem formulations:
.. math::
s' J s + h' s = offset + x' Q x
See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function.
Args:
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, float): A 2-tuple containing:
dict: QUBO coefficients.
float: New energy offset.
Examples:
This example converts an Ising problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to a QUBO problem.
>>> import dimod
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_to_qubo(h, J, 0.5) # doctest: +SKIP
({(1, 1): 0.0, (1, 2): 4.0, (2, 2): 0.0}, -0.5)
"""
# the linear biases are the easiest
q = {(v, v): 2. * bias for v, bias in iteritems(h)}
# next the quadratic biases
for (u, v), bias in iteritems(J):
if bias == 0.0:
continue
q[(u, v)] = 4. * bias
q[(u, u)] -= 2. * bias
q[(v, v)] -= 2. * bias
# finally calculate the offset
offset += sum(itervalues(J)) - sum(itervalues(h))
return q, offset | python | def ising_to_qubo(h, J, offset=0.0):
"""Convert an Ising problem to a QUBO problem.
Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic
unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over
binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q.
Return matrix Q that defines the model as well as the offset in energy between the two
problem formulations:
.. math::
s' J s + h' s = offset + x' Q x
See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function.
Args:
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, float): A 2-tuple containing:
dict: QUBO coefficients.
float: New energy offset.
Examples:
This example converts an Ising problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to a QUBO problem.
>>> import dimod
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_to_qubo(h, J, 0.5) # doctest: +SKIP
({(1, 1): 0.0, (1, 2): 4.0, (2, 2): 0.0}, -0.5)
"""
# the linear biases are the easiest
q = {(v, v): 2. * bias for v, bias in iteritems(h)}
# next the quadratic biases
for (u, v), bias in iteritems(J):
if bias == 0.0:
continue
q[(u, v)] = 4. * bias
q[(u, u)] -= 2. * bias
q[(v, v)] -= 2. * bias
# finally calculate the offset
offset += sum(itervalues(J)) - sum(itervalues(h))
return q, offset | [
"def",
"ising_to_qubo",
"(",
"h",
",",
"J",
",",
"offset",
"=",
"0.0",
")",
":",
"# the linear biases are the easiest",
"q",
"=",
"{",
"(",
"v",
",",
"v",
")",
":",
"2.",
"*",
"bias",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"h",
")",
"}",
... | Convert an Ising problem to a QUBO problem.
Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic
unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over
binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q.
Return matrix Q that defines the model as well as the offset in energy between the two
problem formulations:
.. math::
s' J s + h' s = offset + x' Q x
See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function.
Args:
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, float): A 2-tuple containing:
dict: QUBO coefficients.
float: New energy offset.
Examples:
This example converts an Ising problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to a QUBO problem.
>>> import dimod
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_to_qubo(h, J, 0.5) # doctest: +SKIP
({(1, 1): 0.0, (1, 2): 4.0, (2, 2): 0.0}, -0.5) | [
"Convert",
"an",
"Ising",
"problem",
"to",
"a",
"QUBO",
"problem",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/utilities.py#L147-L206 | train | 212,616 |
dwavesystems/dimod | dimod/utilities.py | qubo_to_ising | def qubo_to_ising(Q, offset=0.0):
"""Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem.
>>> import dimod
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5) # doctest: +SKIP
({1: 0.75, 2: 0.75}, {(1, 2): 0.25}, 1.75)
"""
h = {}
J = {}
linear_offset = 0.0
quadratic_offset = 0.0
for (u, v), bias in iteritems(Q):
if u == v:
if u in h:
h[u] += .5 * bias
else:
h[u] = .5 * bias
linear_offset += bias
else:
if bias != 0.0:
J[(u, v)] = .25 * bias
if u in h:
h[u] += .25 * bias
else:
h[u] = .25 * bias
if v in h:
h[v] += .25 * bias
else:
h[v] = .25 * bias
quadratic_offset += bias
offset += .5 * linear_offset + .25 * quadratic_offset
return h, J, offset | python | def qubo_to_ising(Q, offset=0.0):
"""Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem.
>>> import dimod
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5) # doctest: +SKIP
({1: 0.75, 2: 0.75}, {(1, 2): 0.25}, 1.75)
"""
h = {}
J = {}
linear_offset = 0.0
quadratic_offset = 0.0
for (u, v), bias in iteritems(Q):
if u == v:
if u in h:
h[u] += .5 * bias
else:
h[u] = .5 * bias
linear_offset += bias
else:
if bias != 0.0:
J[(u, v)] = .25 * bias
if u in h:
h[u] += .25 * bias
else:
h[u] = .25 * bias
if v in h:
h[v] += .25 * bias
else:
h[v] = .25 * bias
quadratic_offset += bias
offset += .5 * linear_offset + .25 * quadratic_offset
return h, J, offset | [
"def",
"qubo_to_ising",
"(",
"Q",
",",
"offset",
"=",
"0.0",
")",
":",
"h",
"=",
"{",
"}",
"J",
"=",
"{",
"}",
"linear_offset",
"=",
"0.0",
"quadratic_offset",
"=",
"0.0",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"iteritems",
"(",
"Q",
... | Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem.
>>> import dimod
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5) # doctest: +SKIP
({1: 0.75, 2: 0.75}, {(1, 2): 0.25}, 1.75) | [
"Convert",
"a",
"QUBO",
"problem",
"to",
"an",
"Ising",
"problem",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/utilities.py#L209-L284 | train | 212,617 |
dwavesystems/dimod | dimod/utilities.py | resolve_label_conflict | def resolve_label_conflict(mapping, old_labels=None, new_labels=None):
"""Resolve a self-labeling conflict by creating an intermediate labeling.
Args:
mapping (dict):
A dict mapping the current variable labels to new ones.
old_labels (set, optional, default=None):
The keys of mapping. Can be passed in for performance reasons. These are not checked.
new_labels (set, optional, default=None):
The values of mapping. Can be passed in for performance reasons. These are not checked.
Returns:
tuple: A 2-tuple containing:
dict: A map from the keys of mapping to an intermediate labeling
dict: A map from the intermediate labeling to the values of mapping.
"""
if old_labels is None:
old_labels = set(mapping)
if new_labels is None:
new_labels = set(itervalues(mapping))
# counter will be used to generate the intermediate labels, as an easy optimization
# we start the counter with a high number because often variables are labeled by
# integers starting from 0
counter = itertools.count(2 * len(mapping))
old_to_intermediate = {}
intermediate_to_new = {}
for old, new in iteritems(mapping):
if old == new:
# we can remove self-labels
continue
if old in new_labels or new in old_labels:
# try to get a new unique label
lbl = next(counter)
while lbl in new_labels or lbl in old_labels:
lbl = next(counter)
# add it to the mapping
old_to_intermediate[old] = lbl
intermediate_to_new[lbl] = new
else:
old_to_intermediate[old] = new
# don't need to add it to intermediate_to_new because it is a self-label
return old_to_intermediate, intermediate_to_new | python | def resolve_label_conflict(mapping, old_labels=None, new_labels=None):
"""Resolve a self-labeling conflict by creating an intermediate labeling.
Args:
mapping (dict):
A dict mapping the current variable labels to new ones.
old_labels (set, optional, default=None):
The keys of mapping. Can be passed in for performance reasons. These are not checked.
new_labels (set, optional, default=None):
The values of mapping. Can be passed in for performance reasons. These are not checked.
Returns:
tuple: A 2-tuple containing:
dict: A map from the keys of mapping to an intermediate labeling
dict: A map from the intermediate labeling to the values of mapping.
"""
if old_labels is None:
old_labels = set(mapping)
if new_labels is None:
new_labels = set(itervalues(mapping))
# counter will be used to generate the intermediate labels, as an easy optimization
# we start the counter with a high number because often variables are labeled by
# integers starting from 0
counter = itertools.count(2 * len(mapping))
old_to_intermediate = {}
intermediate_to_new = {}
for old, new in iteritems(mapping):
if old == new:
# we can remove self-labels
continue
if old in new_labels or new in old_labels:
# try to get a new unique label
lbl = next(counter)
while lbl in new_labels or lbl in old_labels:
lbl = next(counter)
# add it to the mapping
old_to_intermediate[old] = lbl
intermediate_to_new[lbl] = new
else:
old_to_intermediate[old] = new
# don't need to add it to intermediate_to_new because it is a self-label
return old_to_intermediate, intermediate_to_new | [
"def",
"resolve_label_conflict",
"(",
"mapping",
",",
"old_labels",
"=",
"None",
",",
"new_labels",
"=",
"None",
")",
":",
"if",
"old_labels",
"is",
"None",
":",
"old_labels",
"=",
"set",
"(",
"mapping",
")",
"if",
"new_labels",
"is",
"None",
":",
"new_lab... | Resolve a self-labeling conflict by creating an intermediate labeling.
Args:
mapping (dict):
A dict mapping the current variable labels to new ones.
old_labels (set, optional, default=None):
The keys of mapping. Can be passed in for performance reasons. These are not checked.
new_labels (set, optional, default=None):
The values of mapping. Can be passed in for performance reasons. These are not checked.
Returns:
tuple: A 2-tuple containing:
dict: A map from the keys of mapping to an intermediate labeling
dict: A map from the intermediate labeling to the values of mapping. | [
"Resolve",
"a",
"self",
"-",
"labeling",
"conflict",
"by",
"creating",
"an",
"intermediate",
"labeling",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/utilities.py#L287-L342 | train | 212,618 |
dwavesystems/dimod | dimod/roof_duality/fix_variables.py | fix_variables | def fix_variables(bqm, sampling_mode=True):
"""Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225
"""
try:
from dimod.roof_duality._fix_variables import fix_variables_wrapper
except ImportError:
raise ImportError("c++ extension roof_duality is not built")
if sampling_mode:
method = 2 # roof-duality only
else:
method = 1 # roof-duality and strongly connected components
linear = bqm.linear
if all(v in linear for v in range(len(bqm))):
# we can work with the binary form of the bqm directly
fixed = fix_variables_wrapper(bqm.binary, method)
else:
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in inverse_mapping.items()}
fixed = fix_variables_wrapper(bqm.relabel_variables(mapping, inplace=False).binary, method)
fixed = {inverse_mapping[v]: val for v, val in fixed.items()}
if bqm.vartype is Vartype.SPIN:
return {v: 2*val - 1 for v, val in fixed.items()}
else:
return fixed | python | def fix_variables(bqm, sampling_mode=True):
"""Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225
"""
try:
from dimod.roof_duality._fix_variables import fix_variables_wrapper
except ImportError:
raise ImportError("c++ extension roof_duality is not built")
if sampling_mode:
method = 2 # roof-duality only
else:
method = 1 # roof-duality and strongly connected components
linear = bqm.linear
if all(v in linear for v in range(len(bqm))):
# we can work with the binary form of the bqm directly
fixed = fix_variables_wrapper(bqm.binary, method)
else:
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in inverse_mapping.items()}
fixed = fix_variables_wrapper(bqm.relabel_variables(mapping, inplace=False).binary, method)
fixed = {inverse_mapping[v]: val for v, val in fixed.items()}
if bqm.vartype is Vartype.SPIN:
return {v: 2*val - 1 for v, val in fixed.items()}
else:
return fixed | [
"def",
"fix_variables",
"(",
"bqm",
",",
"sampling_mode",
"=",
"True",
")",
":",
"try",
":",
"from",
"dimod",
".",
"roof_duality",
".",
"_fix_variables",
"import",
"fix_variables_wrapper",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"c++ extension... | Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225 | [
"Determine",
"assignments",
"for",
"some",
"variables",
"of",
"a",
"binary",
"quadratic",
"model",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/roof_duality/fix_variables.py#L19-L103 | train | 212,619 |
dwavesystems/dimod | dimod/serialization/json.py | dimod_object_hook | def dimod_object_hook(obj):
"""JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders.
"""
if _is_sampleset_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return SampleSet.from_serializable(obj)
elif _is_bqm_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return BinaryQuadraticModel.from_serializable(obj)
return obj | python | def dimod_object_hook(obj):
"""JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders.
"""
if _is_sampleset_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return SampleSet.from_serializable(obj)
elif _is_bqm_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return BinaryQuadraticModel.from_serializable(obj)
return obj | [
"def",
"dimod_object_hook",
"(",
"obj",
")",
":",
"if",
"_is_sampleset_v2",
"(",
"obj",
")",
":",
"# in the future we could handle subtypes but right now we just have the",
"# one",
"return",
"SampleSet",
".",
"from_serializable",
"(",
"obj",
")",
"elif",
"_is_bqm_v2",
... | JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders. | [
"JSON",
"-",
"decoding",
"for",
"dimod",
"objects",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/json.py#L95-L110 | train | 212,620 |
dwavesystems/dimod | dimod/serialization/json.py | _decode_label | def _decode_label(label):
"""Convert a list label into a tuple. Works recursively on nested lists."""
if isinstance(label, list):
return tuple(_decode_label(v) for v in label)
return label | python | def _decode_label(label):
"""Convert a list label into a tuple. Works recursively on nested lists."""
if isinstance(label, list):
return tuple(_decode_label(v) for v in label)
return label | [
"def",
"_decode_label",
"(",
"label",
")",
":",
"if",
"isinstance",
"(",
"label",
",",
"list",
")",
":",
"return",
"tuple",
"(",
"_decode_label",
"(",
"v",
")",
"for",
"v",
"in",
"label",
")",
"return",
"label"
] | Convert a list label into a tuple. Works recursively on nested lists. | [
"Convert",
"a",
"list",
"label",
"into",
"a",
"tuple",
".",
"Works",
"recursively",
"on",
"nested",
"lists",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/json.py#L250-L254 | train | 212,621 |
dwavesystems/dimod | dimod/serialization/json.py | _encode_label | def _encode_label(label):
"""Convert a tuple label into a list. Works recursively on nested tuples."""
if isinstance(label, tuple):
return [_encode_label(v) for v in label]
return label | python | def _encode_label(label):
"""Convert a tuple label into a list. Works recursively on nested tuples."""
if isinstance(label, tuple):
return [_encode_label(v) for v in label]
return label | [
"def",
"_encode_label",
"(",
"label",
")",
":",
"if",
"isinstance",
"(",
"label",
",",
"tuple",
")",
":",
"return",
"[",
"_encode_label",
"(",
"v",
")",
"for",
"v",
"in",
"label",
"]",
"return",
"label"
] | Convert a tuple label into a list. Works recursively on nested tuples. | [
"Convert",
"a",
"tuple",
"label",
"into",
"a",
"list",
".",
"Works",
"recursively",
"on",
"nested",
"tuples",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/json.py#L257-L261 | train | 212,622 |
dwavesystems/dimod | dimod/higherorder/utils.py | make_quadratic | def make_quadratic(poly, strength, vartype=None, bqm=None):
"""Create a binary quadratic model from a higher order polynomial.
Args:
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a tuple of
variables and `bias` the associated bias.
strength (float):
Strength of the reduction constraint. Insufficient strength can result in the
binary quadratic model not having the same minimizations as the polynomial.
vartype (:class:`.Vartype`, optional):
Vartype of the polynomial. If `bqm` is provided, vartype is not required.
bqm (:class:`.BinaryQuadraticModel`, optional):
The terms of the reduced polynomial are added to this binary quadratic model.
If not provided, a new binary quadratic model is created.
Returns:
:class:`.BinaryQuadraticModel`
Examples:
>>> poly = {(0,): -1, (1,): 1, (2,): 1.5, (0, 1): -1, (0, 1, 2): -2}
>>> bqm = dimod.make_quadratic(poly, 5.0, dimod.SPIN)
"""
if bqm is None:
if vartype is None:
raise ValueError("one of vartype and bqm must be provided")
bqm = BinaryQuadraticModel.empty(vartype)
else:
if not isinstance(bqm, BinaryQuadraticModel):
raise TypeError('create_using must be a BinaryQuadraticModel')
if vartype is not None and vartype is not bqm.vartype:
raise ValueError("one of vartype and create_using must be provided")
bqm.info['reduction'] = {}
new_poly = {}
for term, bias in iteritems(poly):
if len(term) == 0:
bqm.add_offset(bias)
elif len(term) == 1:
v, = term
bqm.add_variable(v, bias)
else:
new_poly[term] = bias
return _reduce_degree(bqm, new_poly, vartype, strength) | python | def make_quadratic(poly, strength, vartype=None, bqm=None):
"""Create a binary quadratic model from a higher order polynomial.
Args:
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a tuple of
variables and `bias` the associated bias.
strength (float):
Strength of the reduction constraint. Insufficient strength can result in the
binary quadratic model not having the same minimizations as the polynomial.
vartype (:class:`.Vartype`, optional):
Vartype of the polynomial. If `bqm` is provided, vartype is not required.
bqm (:class:`.BinaryQuadraticModel`, optional):
The terms of the reduced polynomial are added to this binary quadratic model.
If not provided, a new binary quadratic model is created.
Returns:
:class:`.BinaryQuadraticModel`
Examples:
>>> poly = {(0,): -1, (1,): 1, (2,): 1.5, (0, 1): -1, (0, 1, 2): -2}
>>> bqm = dimod.make_quadratic(poly, 5.0, dimod.SPIN)
"""
if bqm is None:
if vartype is None:
raise ValueError("one of vartype and bqm must be provided")
bqm = BinaryQuadraticModel.empty(vartype)
else:
if not isinstance(bqm, BinaryQuadraticModel):
raise TypeError('create_using must be a BinaryQuadraticModel')
if vartype is not None and vartype is not bqm.vartype:
raise ValueError("one of vartype and create_using must be provided")
bqm.info['reduction'] = {}
new_poly = {}
for term, bias in iteritems(poly):
if len(term) == 0:
bqm.add_offset(bias)
elif len(term) == 1:
v, = term
bqm.add_variable(v, bias)
else:
new_poly[term] = bias
return _reduce_degree(bqm, new_poly, vartype, strength) | [
"def",
"make_quadratic",
"(",
"poly",
",",
"strength",
",",
"vartype",
"=",
"None",
",",
"bqm",
"=",
"None",
")",
":",
"if",
"bqm",
"is",
"None",
":",
"if",
"vartype",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"one of vartype and bqm must be provided\... | Create a binary quadratic model from a higher order polynomial.
Args:
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a tuple of
variables and `bias` the associated bias.
strength (float):
Strength of the reduction constraint. Insufficient strength can result in the
binary quadratic model not having the same minimizations as the polynomial.
vartype (:class:`.Vartype`, optional):
Vartype of the polynomial. If `bqm` is provided, vartype is not required.
bqm (:class:`.BinaryQuadraticModel`, optional):
The terms of the reduced polynomial are added to this binary quadratic model.
If not provided, a new binary quadratic model is created.
Returns:
:class:`.BinaryQuadraticModel`
Examples:
>>> poly = {(0,): -1, (1,): 1, (2,): 1.5, (0, 1): -1, (0, 1, 2): -2}
>>> bqm = dimod.make_quadratic(poly, 5.0, dimod.SPIN) | [
"Create",
"a",
"binary",
"quadratic",
"model",
"from",
"a",
"higher",
"order",
"polynomial",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/higherorder/utils.py#L86-L136 | train | 212,623 |
dwavesystems/dimod | dimod/higherorder/utils.py | _reduce_degree | def _reduce_degree(bqm, poly, vartype, scale):
"""helper function for make_quadratic"""
if all(len(term) <= 2 for term in poly):
# termination criteria, we are already quadratic
bqm.add_interactions_from(poly)
return bqm
# determine which pair of variables appear most often
paircounter = Counter()
for term in poly:
if len(term) > 2:
for u, v in itertools.combinations(term, 2):
pair = frozenset((u, v))
paircounter[pair] += 1
pair, __ = paircounter.most_common(1)[0]
u, v = pair
# make a new product variable and aux variable and add constraint that u*v == p
p = '{}*{}'.format(u, v)
while p in bqm.linear:
p = '_' + p
if vartype is Vartype.BINARY:
constraint = _binary_product([u, v, p])
bqm.info['reduction'][(u, v)] = {'product': p}
else:
aux = 'aux{},{}'.format(u, v)
while aux in bqm.linear:
aux = '_' + aux
constraint = _spin_product([u, v, p, aux])
bqm.info['reduction'][(u, v)] = {'product': p, 'auxiliary': aux}
constraint.scale(scale)
bqm.update(constraint)
new_poly = {}
for interaction, bias in poly.items():
if u in interaction and v in interaction:
if len(interaction) == 2:
# in this case we are reducing a quadratic bias, so it becomes linear and can
# be removed
assert len(interaction) >= 2
bqm.add_variable(p, bias)
continue
interaction = tuple(s for s in interaction if s not in pair)
interaction += (p,)
if interaction in new_poly:
new_poly[interaction] += bias
else:
new_poly[interaction] = bias
return _reduce_degree(bqm, new_poly, vartype, scale) | python | def _reduce_degree(bqm, poly, vartype, scale):
"""helper function for make_quadratic"""
if all(len(term) <= 2 for term in poly):
# termination criteria, we are already quadratic
bqm.add_interactions_from(poly)
return bqm
# determine which pair of variables appear most often
paircounter = Counter()
for term in poly:
if len(term) > 2:
for u, v in itertools.combinations(term, 2):
pair = frozenset((u, v))
paircounter[pair] += 1
pair, __ = paircounter.most_common(1)[0]
u, v = pair
# make a new product variable and aux variable and add constraint that u*v == p
p = '{}*{}'.format(u, v)
while p in bqm.linear:
p = '_' + p
if vartype is Vartype.BINARY:
constraint = _binary_product([u, v, p])
bqm.info['reduction'][(u, v)] = {'product': p}
else:
aux = 'aux{},{}'.format(u, v)
while aux in bqm.linear:
aux = '_' + aux
constraint = _spin_product([u, v, p, aux])
bqm.info['reduction'][(u, v)] = {'product': p, 'auxiliary': aux}
constraint.scale(scale)
bqm.update(constraint)
new_poly = {}
for interaction, bias in poly.items():
if u in interaction and v in interaction:
if len(interaction) == 2:
# in this case we are reducing a quadratic bias, so it becomes linear and can
# be removed
assert len(interaction) >= 2
bqm.add_variable(p, bias)
continue
interaction = tuple(s for s in interaction if s not in pair)
interaction += (p,)
if interaction in new_poly:
new_poly[interaction] += bias
else:
new_poly[interaction] = bias
return _reduce_degree(bqm, new_poly, vartype, scale) | [
"def",
"_reduce_degree",
"(",
"bqm",
",",
"poly",
",",
"vartype",
",",
"scale",
")",
":",
"if",
"all",
"(",
"len",
"(",
"term",
")",
"<=",
"2",
"for",
"term",
"in",
"poly",
")",
":",
"# termination criteria, we are already quadratic",
"bqm",
".",
"add_inte... | helper function for make_quadratic | [
"helper",
"function",
"for",
"make_quadratic"
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/higherorder/utils.py#L139-L198 | train | 212,624 |
dwavesystems/dimod | dimod/higherorder/utils.py | poly_energy | def poly_energy(sample_like, poly):
"""Calculates energy of a sample from a higher order polynomial.
Args:
sample (samples_like):
A raw sample. `samples_like` is an extension of NumPy's
array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias.
Returns:
float: The energy of the sample.
"""
msg = ("poly_energy is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energy")
warnings.warn(msg, DeprecationWarning)
# dev note the vartype is not used in the energy calculation and this will
# be deprecated in the future
return BinaryPolynomial(poly, 'SPIN').energy(sample_like) | python | def poly_energy(sample_like, poly):
"""Calculates energy of a sample from a higher order polynomial.
Args:
sample (samples_like):
A raw sample. `samples_like` is an extension of NumPy's
array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias.
Returns:
float: The energy of the sample.
"""
msg = ("poly_energy is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energy")
warnings.warn(msg, DeprecationWarning)
# dev note the vartype is not used in the energy calculation and this will
# be deprecated in the future
return BinaryPolynomial(poly, 'SPIN').energy(sample_like) | [
"def",
"poly_energy",
"(",
"sample_like",
",",
"poly",
")",
":",
"msg",
"=",
"(",
"\"poly_energy is deprecated and will be removed in dimod 0.9.0.\"",
"\"In the future, use BinaryPolynomial.energy\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"DeprecationWarning",
")",... | Calculates energy of a sample from a higher order polynomial.
Args:
sample (samples_like):
A raw sample. `samples_like` is an extension of NumPy's
array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias.
Returns:
float: The energy of the sample. | [
"Calculates",
"energy",
"of",
"a",
"sample",
"from",
"a",
"higher",
"order",
"polynomial",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/higherorder/utils.py#L201-L223 | train | 212,625 |
dwavesystems/dimod | dimod/higherorder/utils.py | poly_energies | def poly_energies(samples_like, poly):
"""Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s).
"""
msg = ("poly_energies is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energies")
warnings.warn(msg, DeprecationWarning)
# dev note the vartype is not used in the energy calculation and this will
# be deprecated in the future
return BinaryPolynomial(poly, 'SPIN').energies(samples_like) | python | def poly_energies(samples_like, poly):
"""Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s).
"""
msg = ("poly_energies is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energies")
warnings.warn(msg, DeprecationWarning)
# dev note the vartype is not used in the energy calculation and this will
# be deprecated in the future
return BinaryPolynomial(poly, 'SPIN').energies(samples_like) | [
"def",
"poly_energies",
"(",
"samples_like",
",",
"poly",
")",
":",
"msg",
"=",
"(",
"\"poly_energies is deprecated and will be removed in dimod 0.9.0.\"",
"\"In the future, use BinaryPolynomial.energies\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"DeprecationWarning",... | Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s). | [
"Calculates",
"energy",
"of",
"samples",
"from",
"a",
"higher",
"order",
"polynomial",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/higherorder/utils.py#L226-L249 | train | 212,626 |
dwavesystems/dimod | dimod/generators/fcl.py | frustrated_loop | def frustrated_loop(graph, num_cycles, R=float('inf'), cycle_predicates=tuple(),
max_failed_cycles=100, seed=None):
"""Generate a frustrated loop problem.
A (generic) frustrated loop (FL) problem is a sum of Hamiltonians, each generated from a single
"good" loop.
1. Generate a loop by random walking on the support graph.
2. If the cycle is "good" (according to provided predicates), continue, else go to 1.
3. Choose one edge of the loop to be anti-ferromagnetic; all other edges are ferromagnetic.
4. Add the loop's coupler values to the FL problem.
If at any time the magnitude of a coupler in the FL problem exceeds a given precision `R`,
remove that coupler from consideration in the loop generation procedure.
This is a generic generator of FL problems that encompasses both the original FL problem
definition from [#HJARTL]_ and the limited FL problem definition from [#KLH]_
Args:
graph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
The graph to build the frustrated loops on. Either an integer n, interpreted as a
complete graph of size n, or a nodes/edges pair, or a NetworkX graph.
num_cyles (int):
Desired number of frustrated cycles.
R (int, optional, default=inf):
Maximum interaction weight.
cycle_predicates (tuple[function], optional):
An iterable of functions, which should accept a cycle and return a bool.
max_failed_cycles (int, optional, default=100):
Maximum number of failures to find a cycle before terminating.
seed (int, optional, default=None):
Random seed.
.. [#HJARTL] Hen, I., J. Job, T. Albash, T.F. Rønnow, M. Troyer, D. Lidar. Probing for quantum
speedup in spin glass problems with planted solutions. https://arxiv.org/abs/1502.01663v2
.. [#KLH] King, A.D., T. Lanting, R. Harris. Performance of a quantum annealer on range-limited
constraint satisfaction problems. https://arxiv.org/abs/1502.02098
"""
nodes, edges = graph
if num_cycles <= 0:
raise ValueError("num_cycles should be a positive integer")
if R <= 0:
raise ValueError("R should be a positive integer")
if max_failed_cycles <= 0:
raise ValueError("max_failed_cycles should be a positive integer")
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
# G = nx.Graph(edges)
# J = collections.defaultdict(int)
adj = {v: set() for v in nodes}
for u, v in edges:
if u in adj:
adj[u].add(v)
else:
adj[u] = {v}
if v in adj:
adj[v].add(u)
else:
adj[v] = {u}
bqm = BinaryQuadraticModel({v: 0.0 for v in nodes}, {edge: 0.0 for edge in edges}, 0.0, SPIN)
failed_cycles = 0
good_cycles = 0
while good_cycles < num_cycles and failed_cycles < max_failed_cycles:
cycle = _random_cycle(adj, r)
# if the cycle failed or it is otherwise invalid, mark as failed and continue
if cycle is None or not all(pred(cycle) for pred in cycle_predicates):
failed_cycles += 1
continue
# If its a good cycle, modify J with it.
good_cycles += 1
cycle_J = {(cycle[i - 1], cycle[i]): -1. for i in range(len(cycle))}
# randomly select an edge and flip it
idx = r.randint(len(cycle))
cycle_J[(cycle[idx - 1], cycle[idx])] *= -1.
# update the bqm
bqm.add_interactions_from(cycle_J)
for u, v in cycle_J:
if abs(bqm.adj[u][v]) >= R:
adj[u].remove(v)
adj[v].remove(u)
if good_cycles < num_cycles:
raise RuntimeError
return bqm | python | def frustrated_loop(graph, num_cycles, R=float('inf'), cycle_predicates=tuple(),
max_failed_cycles=100, seed=None):
"""Generate a frustrated loop problem.
A (generic) frustrated loop (FL) problem is a sum of Hamiltonians, each generated from a single
"good" loop.
1. Generate a loop by random walking on the support graph.
2. If the cycle is "good" (according to provided predicates), continue, else go to 1.
3. Choose one edge of the loop to be anti-ferromagnetic; all other edges are ferromagnetic.
4. Add the loop's coupler values to the FL problem.
If at any time the magnitude of a coupler in the FL problem exceeds a given precision `R`,
remove that coupler from consideration in the loop generation procedure.
This is a generic generator of FL problems that encompasses both the original FL problem
definition from [#HJARTL]_ and the limited FL problem definition from [#KLH]_
Args:
graph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
The graph to build the frustrated loops on. Either an integer n, interpreted as a
complete graph of size n, or a nodes/edges pair, or a NetworkX graph.
num_cyles (int):
Desired number of frustrated cycles.
R (int, optional, default=inf):
Maximum interaction weight.
cycle_predicates (tuple[function], optional):
An iterable of functions, which should accept a cycle and return a bool.
max_failed_cycles (int, optional, default=100):
Maximum number of failures to find a cycle before terminating.
seed (int, optional, default=None):
Random seed.
.. [#HJARTL] Hen, I., J. Job, T. Albash, T.F. Rønnow, M. Troyer, D. Lidar. Probing for quantum
speedup in spin glass problems with planted solutions. https://arxiv.org/abs/1502.01663v2
.. [#KLH] King, A.D., T. Lanting, R. Harris. Performance of a quantum annealer on range-limited
constraint satisfaction problems. https://arxiv.org/abs/1502.02098
"""
nodes, edges = graph
if num_cycles <= 0:
raise ValueError("num_cycles should be a positive integer")
if R <= 0:
raise ValueError("R should be a positive integer")
if max_failed_cycles <= 0:
raise ValueError("max_failed_cycles should be a positive integer")
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
# G = nx.Graph(edges)
# J = collections.defaultdict(int)
adj = {v: set() for v in nodes}
for u, v in edges:
if u in adj:
adj[u].add(v)
else:
adj[u] = {v}
if v in adj:
adj[v].add(u)
else:
adj[v] = {u}
bqm = BinaryQuadraticModel({v: 0.0 for v in nodes}, {edge: 0.0 for edge in edges}, 0.0, SPIN)
failed_cycles = 0
good_cycles = 0
while good_cycles < num_cycles and failed_cycles < max_failed_cycles:
cycle = _random_cycle(adj, r)
# if the cycle failed or it is otherwise invalid, mark as failed and continue
if cycle is None or not all(pred(cycle) for pred in cycle_predicates):
failed_cycles += 1
continue
# If its a good cycle, modify J with it.
good_cycles += 1
cycle_J = {(cycle[i - 1], cycle[i]): -1. for i in range(len(cycle))}
# randomly select an edge and flip it
idx = r.randint(len(cycle))
cycle_J[(cycle[idx - 1], cycle[idx])] *= -1.
# update the bqm
bqm.add_interactions_from(cycle_J)
for u, v in cycle_J:
if abs(bqm.adj[u][v]) >= R:
adj[u].remove(v)
adj[v].remove(u)
if good_cycles < num_cycles:
raise RuntimeError
return bqm | [
"def",
"frustrated_loop",
"(",
"graph",
",",
"num_cycles",
",",
"R",
"=",
"float",
"(",
"'inf'",
")",
",",
"cycle_predicates",
"=",
"tuple",
"(",
")",
",",
"max_failed_cycles",
"=",
"100",
",",
"seed",
"=",
"None",
")",
":",
"nodes",
",",
"edges",
"=",... | Generate a frustrated loop problem.
A (generic) frustrated loop (FL) problem is a sum of Hamiltonians, each generated from a single
"good" loop.
1. Generate a loop by random walking on the support graph.
2. If the cycle is "good" (according to provided predicates), continue, else go to 1.
3. Choose one edge of the loop to be anti-ferromagnetic; all other edges are ferromagnetic.
4. Add the loop's coupler values to the FL problem.
If at any time the magnitude of a coupler in the FL problem exceeds a given precision `R`,
remove that coupler from consideration in the loop generation procedure.
This is a generic generator of FL problems that encompasses both the original FL problem
definition from [#HJARTL]_ and the limited FL problem definition from [#KLH]_
Args:
graph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
The graph to build the frustrated loops on. Either an integer n, interpreted as a
complete graph of size n, or a nodes/edges pair, or a NetworkX graph.
num_cyles (int):
Desired number of frustrated cycles.
R (int, optional, default=inf):
Maximum interaction weight.
cycle_predicates (tuple[function], optional):
An iterable of functions, which should accept a cycle and return a bool.
max_failed_cycles (int, optional, default=100):
Maximum number of failures to find a cycle before terminating.
seed (int, optional, default=None):
Random seed.
.. [#HJARTL] Hen, I., J. Job, T. Albash, T.F. Rønnow, M. Troyer, D. Lidar. Probing for quantum
speedup in spin glass problems with planted solutions. https://arxiv.org/abs/1502.01663v2
.. [#KLH] King, A.D., T. Lanting, R. Harris. Performance of a quantum annealer on range-limited
constraint satisfaction problems. https://arxiv.org/abs/1502.02098 | [
"Generate",
"a",
"frustrated",
"loop",
"problem",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/generators/fcl.py#L32-L132 | train | 212,627 |
dwavesystems/dimod | dimod/generators/fcl.py | _random_cycle | def _random_cycle(adj, random_state):
"""Find a cycle using a random graph walk."""
# step through idx values in adj to pick a random one, random.choice does not work on dicts
n = random_state.randint(len(adj))
for idx, v in enumerate(adj):
if idx == n:
break
start = v
walk = [start]
visited = {start: 0}
while True:
if len(walk) > 1:
# as long as we don't step back one we won't have any repeated edges
previous = walk[-2]
neighbors = [u for u in adj[walk[-1]] if u != previous]
else:
neighbors = list(adj[walk[-1]])
if not neighbors:
# we've walked into a dead end
return None
# get a random neighbor
u = random_state.choice(neighbors)
if u in visited:
# if we've seen this neighbour, then we have a cycle starting from it
return walk[visited[u]:]
else:
# add to walk and keep moving
walk.append(u)
visited[u] = len(visited) | python | def _random_cycle(adj, random_state):
"""Find a cycle using a random graph walk."""
# step through idx values in adj to pick a random one, random.choice does not work on dicts
n = random_state.randint(len(adj))
for idx, v in enumerate(adj):
if idx == n:
break
start = v
walk = [start]
visited = {start: 0}
while True:
if len(walk) > 1:
# as long as we don't step back one we won't have any repeated edges
previous = walk[-2]
neighbors = [u for u in adj[walk[-1]] if u != previous]
else:
neighbors = list(adj[walk[-1]])
if not neighbors:
# we've walked into a dead end
return None
# get a random neighbor
u = random_state.choice(neighbors)
if u in visited:
# if we've seen this neighbour, then we have a cycle starting from it
return walk[visited[u]:]
else:
# add to walk and keep moving
walk.append(u)
visited[u] = len(visited) | [
"def",
"_random_cycle",
"(",
"adj",
",",
"random_state",
")",
":",
"# step through idx values in adj to pick a random one, random.choice does not work on dicts",
"n",
"=",
"random_state",
".",
"randint",
"(",
"len",
"(",
"adj",
")",
")",
"for",
"idx",
",",
"v",
"in",
... | Find a cycle using a random graph walk. | [
"Find",
"a",
"cycle",
"using",
"a",
"random",
"graph",
"walk",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/generators/fcl.py#L135-L168 | train | 212,628 |
dwavesystems/dimod | dimod/reference/composites/spin_transform.py | SpinReversalTransformComposite.sample | def sample(self, bqm, num_spin_reversal_transforms=2, spin_reversal_variables=None, **kwargs):
"""Sample from the binary quadratic model.
Args:
bqm (:obj:`~dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_spin_reversal_transforms (integer, optional, default=2):
Number of spin reversal transform runs.
spin_reversal_variables (list/dict, optional):
Deprecated and no longer functional.
Returns:
:obj:`.SampleSet`
Examples:
This example runs 100 spin reversals applied to one variable of a QUBO problem.
>>> import dimod
...
>>> base_sampler = dimod.ExactSolver()
>>> composed_sampler = dimod.SpinReversalTransformComposite(base_sampler)
>>> Q = {('a', 'a'): -1, ('b', 'b'): -1, ('a', 'b'): 2}
>>> response = composed_sampler.sample_qubo(Q,
... num_spin_reversal_transforms=100,
... spin_reversal_variables={'a'})
>>> len(response)
400
>>> print(next(response.data())) # doctest: +SKIP
Sample(sample={'a': 0, 'b': 1}, energy=-1.0)
"""
if spin_reversal_variables is not None:
# this kwarg does not actually make sense for multiple SRTs. To
# get the same functionality a user should apply them by hand
# to their BQM before submitting.
import warnings
warnings.warn("'spin_reversal_variables' kwarg is deprecated and no longer functions.",
DeprecationWarning)
# make a main response
responses = []
flipped_bqm = bqm.copy()
transform = {v: False for v in bqm.variables}
for ii in range(num_spin_reversal_transforms):
# flip each variable with a 50% chance
for v in bqm:
if random() > .5:
transform[v] = not transform[v]
flipped_bqm.flip_variable(v)
flipped_response = self.child.sample(flipped_bqm, **kwargs)
tf_idxs = [flipped_response.variables.index(v)
for v, flip in transform.items() if flip]
if bqm.vartype is Vartype.SPIN:
flipped_response.record.sample[:, tf_idxs] = -1 * flipped_response.record.sample[:, tf_idxs]
else:
flipped_response.record.sample[:, tf_idxs] = 1 - flipped_response.record.sample[:, tf_idxs]
responses.append(flipped_response)
return concatenate(responses) | python | def sample(self, bqm, num_spin_reversal_transforms=2, spin_reversal_variables=None, **kwargs):
"""Sample from the binary quadratic model.
Args:
bqm (:obj:`~dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_spin_reversal_transforms (integer, optional, default=2):
Number of spin reversal transform runs.
spin_reversal_variables (list/dict, optional):
Deprecated and no longer functional.
Returns:
:obj:`.SampleSet`
Examples:
This example runs 100 spin reversals applied to one variable of a QUBO problem.
>>> import dimod
...
>>> base_sampler = dimod.ExactSolver()
>>> composed_sampler = dimod.SpinReversalTransformComposite(base_sampler)
>>> Q = {('a', 'a'): -1, ('b', 'b'): -1, ('a', 'b'): 2}
>>> response = composed_sampler.sample_qubo(Q,
... num_spin_reversal_transforms=100,
... spin_reversal_variables={'a'})
>>> len(response)
400
>>> print(next(response.data())) # doctest: +SKIP
Sample(sample={'a': 0, 'b': 1}, energy=-1.0)
"""
if spin_reversal_variables is not None:
# this kwarg does not actually make sense for multiple SRTs. To
# get the same functionality a user should apply them by hand
# to their BQM before submitting.
import warnings
warnings.warn("'spin_reversal_variables' kwarg is deprecated and no longer functions.",
DeprecationWarning)
# make a main response
responses = []
flipped_bqm = bqm.copy()
transform = {v: False for v in bqm.variables}
for ii in range(num_spin_reversal_transforms):
# flip each variable with a 50% chance
for v in bqm:
if random() > .5:
transform[v] = not transform[v]
flipped_bqm.flip_variable(v)
flipped_response = self.child.sample(flipped_bqm, **kwargs)
tf_idxs = [flipped_response.variables.index(v)
for v, flip in transform.items() if flip]
if bqm.vartype is Vartype.SPIN:
flipped_response.record.sample[:, tf_idxs] = -1 * flipped_response.record.sample[:, tf_idxs]
else:
flipped_response.record.sample[:, tf_idxs] = 1 - flipped_response.record.sample[:, tf_idxs]
responses.append(flipped_response)
return concatenate(responses) | [
"def",
"sample",
"(",
"self",
",",
"bqm",
",",
"num_spin_reversal_transforms",
"=",
"2",
",",
"spin_reversal_variables",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"spin_reversal_variables",
"is",
"not",
"None",
":",
"# this kwarg does not actually make... | Sample from the binary quadratic model.
Args:
bqm (:obj:`~dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_spin_reversal_transforms (integer, optional, default=2):
Number of spin reversal transform runs.
spin_reversal_variables (list/dict, optional):
Deprecated and no longer functional.
Returns:
:obj:`.SampleSet`
Examples:
This example runs 100 spin reversals applied to one variable of a QUBO problem.
>>> import dimod
...
>>> base_sampler = dimod.ExactSolver()
>>> composed_sampler = dimod.SpinReversalTransformComposite(base_sampler)
>>> Q = {('a', 'a'): -1, ('b', 'b'): -1, ('a', 'b'): 2}
>>> response = composed_sampler.sample_qubo(Q,
... num_spin_reversal_transforms=100,
... spin_reversal_variables={'a'})
>>> len(response)
400
>>> print(next(response.data())) # doctest: +SKIP
Sample(sample={'a': 0, 'b': 1}, energy=-1.0) | [
"Sample",
"from",
"the",
"binary",
"quadratic",
"model",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/spin_transform.py#L87-L154 | train | 212,629 |
dwavesystems/dimod | dimod/serialization/format.py | _SampleTable.append_index | def append_index(self, num_rows):
"""Add an index column.
Left justified, width is determined by the space needed to print the
largest index.
"""
width = len(str(num_rows - 1))
def f(datum):
return str(datum.idx).ljust(width)
header = ' '*width
self.append(header, f) | python | def append_index(self, num_rows):
"""Add an index column.
Left justified, width is determined by the space needed to print the
largest index.
"""
width = len(str(num_rows - 1))
def f(datum):
return str(datum.idx).ljust(width)
header = ' '*width
self.append(header, f) | [
"def",
"append_index",
"(",
"self",
",",
"num_rows",
")",
":",
"width",
"=",
"len",
"(",
"str",
"(",
"num_rows",
"-",
"1",
")",
")",
"def",
"f",
"(",
"datum",
")",
":",
"return",
"str",
"(",
"datum",
".",
"idx",
")",
".",
"ljust",
"(",
"width",
... | Add an index column.
Left justified, width is determined by the space needed to print the
largest index. | [
"Add",
"an",
"index",
"column",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/format.py#L117-L129 | train | 212,630 |
dwavesystems/dimod | dimod/serialization/format.py | _SampleTable.append_sample | def append_sample(self, v, vartype, _left=False):
"""Add a sample column"""
vstr = str(v).rjust(2) # the variable will be len 0, or 1
length = len(vstr)
if vartype is dimod.SPIN:
def f(datum):
return _spinstr(datum.sample[v], rjust=length)
else:
def f(datum):
return _binarystr(datum.sample[v], rjust=length)
self.append(vstr, f, _left=_left) | python | def append_sample(self, v, vartype, _left=False):
"""Add a sample column"""
vstr = str(v).rjust(2) # the variable will be len 0, or 1
length = len(vstr)
if vartype is dimod.SPIN:
def f(datum):
return _spinstr(datum.sample[v], rjust=length)
else:
def f(datum):
return _binarystr(datum.sample[v], rjust=length)
self.append(vstr, f, _left=_left) | [
"def",
"append_sample",
"(",
"self",
",",
"v",
",",
"vartype",
",",
"_left",
"=",
"False",
")",
":",
"vstr",
"=",
"str",
"(",
"v",
")",
".",
"rjust",
"(",
"2",
")",
"# the variable will be len 0, or 1",
"length",
"=",
"len",
"(",
"vstr",
")",
"if",
"... | Add a sample column | [
"Add",
"a",
"sample",
"column"
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/format.py#L131-L143 | train | 212,631 |
dwavesystems/dimod | dimod/serialization/format.py | _SampleTable.append_vector | def append_vector(self, name, vector, _left=False):
"""Add a data vectors column."""
if np.issubdtype(vector.dtype, np.integer):
# determine the length we need
largest = str(max(vector.max(), vector.min(), key=abs))
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return str(getattr(datum, name)).rjust(length)
elif np.issubdtype(vector.dtype, np.floating):
largest = np.format_float_positional(max(vector.max(), vector.min(), key=abs),
precision=6, trim='0')
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return np.format_float_positional(getattr(datum, name),
precision=6, trim='0',
).rjust(length)
else:
length = 7
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
r = repr(getattr(datum, name))
if len(r) > length:
r = r[:length-3] + '...'
return r.rjust(length)
self.append(header, f, _left=_left) | python | def append_vector(self, name, vector, _left=False):
"""Add a data vectors column."""
if np.issubdtype(vector.dtype, np.integer):
# determine the length we need
largest = str(max(vector.max(), vector.min(), key=abs))
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return str(getattr(datum, name)).rjust(length)
elif np.issubdtype(vector.dtype, np.floating):
largest = np.format_float_positional(max(vector.max(), vector.min(), key=abs),
precision=6, trim='0')
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return np.format_float_positional(getattr(datum, name),
precision=6, trim='0',
).rjust(length)
else:
length = 7
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
r = repr(getattr(datum, name))
if len(r) > length:
r = r[:length-3] + '...'
return r.rjust(length)
self.append(header, f, _left=_left) | [
"def",
"append_vector",
"(",
"self",
",",
"name",
",",
"vector",
",",
"_left",
"=",
"False",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"vector",
".",
"dtype",
",",
"np",
".",
"integer",
")",
":",
"# determine the length we need",
"largest",
"=",
"str... | Add a data vectors column. | [
"Add",
"a",
"data",
"vectors",
"column",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/format.py#L148-L188 | train | 212,632 |
dwavesystems/dimod | dimod/serialization/format.py | Formatter.format | def format(self, obj, **kwargs):
"""Return the formatted representation of the object as a string."""
sio = StringIO()
self.fprint(obj, stream=sio, **kwargs)
return sio.getvalue() | python | def format(self, obj, **kwargs):
"""Return the formatted representation of the object as a string."""
sio = StringIO()
self.fprint(obj, stream=sio, **kwargs)
return sio.getvalue() | [
"def",
"format",
"(",
"self",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"sio",
"=",
"StringIO",
"(",
")",
"self",
".",
"fprint",
"(",
"obj",
",",
"stream",
"=",
"sio",
",",
"*",
"*",
"kwargs",
")",
"return",
"sio",
".",
"getvalue",
"(",
")... | Return the formatted representation of the object as a string. | [
"Return",
"the",
"formatted",
"representation",
"of",
"the",
"object",
"as",
"a",
"string",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/format.py#L244-L248 | train | 212,633 |
dwavesystems/dimod | dimod/serialization/format.py | Formatter.fprint | def fprint(self, obj, stream=None, **kwargs):
"""Prints the formatted representation of the object on stream"""
if stream is None:
stream = sys.stdout
options = self.options
options.update(kwargs)
if isinstance(obj, dimod.SampleSet):
self._print_sampleset(obj, stream, **options)
return
raise TypeError("cannot format type {}".format(type(obj))) | python | def fprint(self, obj, stream=None, **kwargs):
"""Prints the formatted representation of the object on stream"""
if stream is None:
stream = sys.stdout
options = self.options
options.update(kwargs)
if isinstance(obj, dimod.SampleSet):
self._print_sampleset(obj, stream, **options)
return
raise TypeError("cannot format type {}".format(type(obj))) | [
"def",
"fprint",
"(",
"self",
",",
"obj",
",",
"stream",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"stream",
"is",
"None",
":",
"stream",
"=",
"sys",
".",
"stdout",
"options",
"=",
"self",
".",
"options",
"options",
".",
"update",
"(",
... | Prints the formatted representation of the object on stream | [
"Prints",
"the",
"formatted",
"representation",
"of",
"the",
"object",
"on",
"stream"
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/serialization/format.py#L250-L262 | train | 212,634 |
dwavesystems/dimod | dimod/sampleset.py | as_samples | def as_samples(samples_like, dtype=None, copy=False, order='C'):
"""Convert a samples_like object to a NumPy array and list of labels.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like_ structure. See examples below.
dtype (data-type, optional):
dtype for the returned samples array. If not provided, it is either
derived from `samples_like`, if that object has a dtype, or set to
:class:`numpy.int8`.
copy (bool, optional, default=False):
If true, then samples_like is guaranteed to be copied, otherwise
it is only copied if necessary.
order ({'K', 'A', 'C', 'F'}, optional, default='C'):
Specify the memory layout of the array. See :func:`numpy.array`.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: Samples.
list: Variable labels
Examples:
The following examples convert a variety of samples_like objects:
NumPy arrays
>>> import numpy as np
...
>>> dimod.as_samples(np.ones(5, dtype='int8'))
(array([[1, 1, 1, 1, 1]], dtype=int8), [0, 1, 2, 3, 4])
>>> dimod.as_samples(np.zeros((5, 2), dtype='int8'))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), [0, 1])
Lists
>>> dimod.as_samples([-1, +1, -1])
(array([[-1, 1, -1]], dtype=int8), [0, 1, 2])
>>> dimod.as_samples([[-1], [+1], [-1]])
(array([[-1],
[ 1],
[-1]], dtype=int8), [0])
Dicts
>>> dimod.as_samples({'a': 0, 'b': 1, 'c': 0}) # doctest: +SKIP
(array([[0, 1, 0]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples([{'a': -1, 'b': +1}, {'a': 1, 'b': 1}]) # doctest: +SKIP
(array([[-1, 1],
[ 1, 1]], dtype=int8), ['a', 'b'])
A 2-tuple containing an array_like object and a list of labels
>>> dimod.as_samples(([-1, +1, -1], ['a', 'b', 'c']))
(array([[-1, 1, -1]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples((np.zeros((5, 2), dtype='int8'), ['in', 'out']))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), ['in', 'out'])
.. _array_like: https://docs.scipy.org/doc/numpy/user/basics.creation.html
"""
if isinstance(samples_like, SampleSet):
# we implicitely support this by handling an iterable of mapping but
# it is much faster to just do this here.
return samples_like.record.sample, list(samples_like.variables)
if isinstance(samples_like, tuple) and len(samples_like) == 2:
samples_like, labels = samples_like
if not isinstance(labels, list) and labels is not None:
labels = list(labels)
else:
labels = None
if isinstance(samples_like, abc.Iterator):
# if we don't check this case we can get unexpected behaviour where an
# iterator can be depleted
raise TypeError('samples_like cannot be an iterator')
if isinstance(samples_like, abc.Mapping):
return as_samples(([samples_like], labels), dtype=dtype)
if (isinstance(samples_like, list) and samples_like and
isinstance(samples_like[0], numbers.Number)):
# this is not actually necessary but it speeds up the
# samples_like = [1, 0, 1,...] case significantly
return as_samples(([samples_like], labels), dtype=dtype)
if not isinstance(samples_like, np.ndarray):
if any(isinstance(sample, abc.Mapping) for sample in samples_like):
# go through samples-like, turning the dicts into lists
samples_like, old = list(samples_like), samples_like
if labels is None:
first = samples_like[0]
if isinstance(first, abc.Mapping):
labels = list(first)
else:
labels = list(range(len(first)))
for idx, sample in enumerate(old):
if isinstance(sample, abc.Mapping):
try:
samples_like[idx] = [sample[v] for v in labels]
except KeyError:
raise ValueError("samples_like and labels do not match")
if dtype is None and not hasattr(samples_like, 'dtype'):
dtype = np.int8
# samples-like should now be array-like
arr = np.array(samples_like, dtype=dtype, copy=copy, order=order)
if arr.ndim > 2:
raise ValueError("expected samples_like to be <= 2 dimensions")
if arr.ndim < 2:
if arr.size:
arr = np.atleast_2d(arr)
elif labels: # is not None and len > 0
arr = arr.reshape((0, len(labels)))
else:
arr = arr.reshape((0, 0))
# ok we're basically done, just need to check against the labels
if labels is None:
return arr, list(range(arr.shape[1]))
elif len(labels) != arr.shape[1]:
raise ValueError("samples_like and labels dimensions do not match")
else:
return arr, labels | python | def as_samples(samples_like, dtype=None, copy=False, order='C'):
"""Convert a samples_like object to a NumPy array and list of labels.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like_ structure. See examples below.
dtype (data-type, optional):
dtype for the returned samples array. If not provided, it is either
derived from `samples_like`, if that object has a dtype, or set to
:class:`numpy.int8`.
copy (bool, optional, default=False):
If true, then samples_like is guaranteed to be copied, otherwise
it is only copied if necessary.
order ({'K', 'A', 'C', 'F'}, optional, default='C'):
Specify the memory layout of the array. See :func:`numpy.array`.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: Samples.
list: Variable labels
Examples:
The following examples convert a variety of samples_like objects:
NumPy arrays
>>> import numpy as np
...
>>> dimod.as_samples(np.ones(5, dtype='int8'))
(array([[1, 1, 1, 1, 1]], dtype=int8), [0, 1, 2, 3, 4])
>>> dimod.as_samples(np.zeros((5, 2), dtype='int8'))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), [0, 1])
Lists
>>> dimod.as_samples([-1, +1, -1])
(array([[-1, 1, -1]], dtype=int8), [0, 1, 2])
>>> dimod.as_samples([[-1], [+1], [-1]])
(array([[-1],
[ 1],
[-1]], dtype=int8), [0])
Dicts
>>> dimod.as_samples({'a': 0, 'b': 1, 'c': 0}) # doctest: +SKIP
(array([[0, 1, 0]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples([{'a': -1, 'b': +1}, {'a': 1, 'b': 1}]) # doctest: +SKIP
(array([[-1, 1],
[ 1, 1]], dtype=int8), ['a', 'b'])
A 2-tuple containing an array_like object and a list of labels
>>> dimod.as_samples(([-1, +1, -1], ['a', 'b', 'c']))
(array([[-1, 1, -1]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples((np.zeros((5, 2), dtype='int8'), ['in', 'out']))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), ['in', 'out'])
.. _array_like: https://docs.scipy.org/doc/numpy/user/basics.creation.html
"""
if isinstance(samples_like, SampleSet):
# we implicitely support this by handling an iterable of mapping but
# it is much faster to just do this here.
return samples_like.record.sample, list(samples_like.variables)
if isinstance(samples_like, tuple) and len(samples_like) == 2:
samples_like, labels = samples_like
if not isinstance(labels, list) and labels is not None:
labels = list(labels)
else:
labels = None
if isinstance(samples_like, abc.Iterator):
# if we don't check this case we can get unexpected behaviour where an
# iterator can be depleted
raise TypeError('samples_like cannot be an iterator')
if isinstance(samples_like, abc.Mapping):
return as_samples(([samples_like], labels), dtype=dtype)
if (isinstance(samples_like, list) and samples_like and
isinstance(samples_like[0], numbers.Number)):
# this is not actually necessary but it speeds up the
# samples_like = [1, 0, 1,...] case significantly
return as_samples(([samples_like], labels), dtype=dtype)
if not isinstance(samples_like, np.ndarray):
if any(isinstance(sample, abc.Mapping) for sample in samples_like):
# go through samples-like, turning the dicts into lists
samples_like, old = list(samples_like), samples_like
if labels is None:
first = samples_like[0]
if isinstance(first, abc.Mapping):
labels = list(first)
else:
labels = list(range(len(first)))
for idx, sample in enumerate(old):
if isinstance(sample, abc.Mapping):
try:
samples_like[idx] = [sample[v] for v in labels]
except KeyError:
raise ValueError("samples_like and labels do not match")
if dtype is None and not hasattr(samples_like, 'dtype'):
dtype = np.int8
# samples-like should now be array-like
arr = np.array(samples_like, dtype=dtype, copy=copy, order=order)
if arr.ndim > 2:
raise ValueError("expected samples_like to be <= 2 dimensions")
if arr.ndim < 2:
if arr.size:
arr = np.atleast_2d(arr)
elif labels: # is not None and len > 0
arr = arr.reshape((0, len(labels)))
else:
arr = arr.reshape((0, 0))
# ok we're basically done, just need to check against the labels
if labels is None:
return arr, list(range(arr.shape[1]))
elif len(labels) != arr.shape[1]:
raise ValueError("samples_like and labels dimensions do not match")
else:
return arr, labels | [
"def",
"as_samples",
"(",
"samples_like",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
",",
"order",
"=",
"'C'",
")",
":",
"if",
"isinstance",
"(",
"samples_like",
",",
"SampleSet",
")",
":",
"# we implicitely support this by handling an iterable of mappi... | Convert a samples_like object to a NumPy array and list of labels.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like_ structure. See examples below.
dtype (data-type, optional):
dtype for the returned samples array. If not provided, it is either
derived from `samples_like`, if that object has a dtype, or set to
:class:`numpy.int8`.
copy (bool, optional, default=False):
If true, then samples_like is guaranteed to be copied, otherwise
it is only copied if necessary.
order ({'K', 'A', 'C', 'F'}, optional, default='C'):
Specify the memory layout of the array. See :func:`numpy.array`.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: Samples.
list: Variable labels
Examples:
The following examples convert a variety of samples_like objects:
NumPy arrays
>>> import numpy as np
...
>>> dimod.as_samples(np.ones(5, dtype='int8'))
(array([[1, 1, 1, 1, 1]], dtype=int8), [0, 1, 2, 3, 4])
>>> dimod.as_samples(np.zeros((5, 2), dtype='int8'))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), [0, 1])
Lists
>>> dimod.as_samples([-1, +1, -1])
(array([[-1, 1, -1]], dtype=int8), [0, 1, 2])
>>> dimod.as_samples([[-1], [+1], [-1]])
(array([[-1],
[ 1],
[-1]], dtype=int8), [0])
Dicts
>>> dimod.as_samples({'a': 0, 'b': 1, 'c': 0}) # doctest: +SKIP
(array([[0, 1, 0]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples([{'a': -1, 'b': +1}, {'a': 1, 'b': 1}]) # doctest: +SKIP
(array([[-1, 1],
[ 1, 1]], dtype=int8), ['a', 'b'])
A 2-tuple containing an array_like object and a list of labels
>>> dimod.as_samples(([-1, +1, -1], ['a', 'b', 'c']))
(array([[-1, 1, -1]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples((np.zeros((5, 2), dtype='int8'), ['in', 'out']))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), ['in', 'out'])
.. _array_like: https://docs.scipy.org/doc/numpy/user/basics.creation.html | [
"Convert",
"a",
"samples_like",
"object",
"to",
"a",
"NumPy",
"array",
"and",
"list",
"of",
"labels",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L44-L186 | train | 212,635 |
dwavesystems/dimod | dimod/sampleset.py | concatenate | def concatenate(samplesets, defaults=None):
"""Combine SampleSets.
Args:
samplesets (iterable[:obj:`.SampleSet`):
An iterable of sample sets.
defaults (dict, optional):
Dictionary mapping data vector names to the corresponding default values.
Returns:
:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
given in `samplesets`.
Examples:
>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
>>> ab = dimod.concatenate((a, b))
>>> ab.record.sample
array([[-1, 1],
[ 1, -1]], dtype=int8)
"""
itertup = iter(samplesets)
try:
first = next(itertup)
except StopIteration:
raise ValueError("samplesets must contain at least one SampleSet")
vartype = first.vartype
variables = first.variables
records = [first.record]
records.extend(_iter_records(itertup, vartype, variables))
# dev note: I was able to get ~2x performance boost when trying to
# implement the same functionality here by hand (I didn't know that
# this function existed then). However I think it is better to use
# numpy's function and rely on their testing etc. If however this becomes
# a performance bottleneck in the future, it might be worth changing.
record = recfunctions.stack_arrays(records, defaults=defaults,
asrecarray=True, usemask=False)
return SampleSet(record, variables, {}, vartype) | python | def concatenate(samplesets, defaults=None):
"""Combine SampleSets.
Args:
samplesets (iterable[:obj:`.SampleSet`):
An iterable of sample sets.
defaults (dict, optional):
Dictionary mapping data vector names to the corresponding default values.
Returns:
:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
given in `samplesets`.
Examples:
>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
>>> ab = dimod.concatenate((a, b))
>>> ab.record.sample
array([[-1, 1],
[ 1, -1]], dtype=int8)
"""
itertup = iter(samplesets)
try:
first = next(itertup)
except StopIteration:
raise ValueError("samplesets must contain at least one SampleSet")
vartype = first.vartype
variables = first.variables
records = [first.record]
records.extend(_iter_records(itertup, vartype, variables))
# dev note: I was able to get ~2x performance boost when trying to
# implement the same functionality here by hand (I didn't know that
# this function existed then). However I think it is better to use
# numpy's function and rely on their testing etc. If however this becomes
# a performance bottleneck in the future, it might be worth changing.
record = recfunctions.stack_arrays(records, defaults=defaults,
asrecarray=True, usemask=False)
return SampleSet(record, variables, {}, vartype) | [
"def",
"concatenate",
"(",
"samplesets",
",",
"defaults",
"=",
"None",
")",
":",
"itertup",
"=",
"iter",
"(",
"samplesets",
")",
"try",
":",
"first",
"=",
"next",
"(",
"itertup",
")",
"except",
"StopIteration",
":",
"raise",
"ValueError",
"(",
"\"sampleset... | Combine SampleSets.
Args:
samplesets (iterable[:obj:`.SampleSet`):
An iterable of sample sets.
defaults (dict, optional):
Dictionary mapping data vector names to the corresponding default values.
Returns:
:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
given in `samplesets`.
Examples:
>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
>>> ab = dimod.concatenate((a, b))
>>> ab.record.sample
array([[-1, 1],
[ 1, -1]], dtype=int8) | [
"Combine",
"SampleSets",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L189-L234 | train | 212,636 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.from_samples_bqm | def from_samples_bqm(cls, samples_like, bqm, **kwargs):
"""Build a SampleSet from raw samples using a BinaryQuadraticModel to get energies and vartype.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like.
See :func:`.as_samples`.
bqm (:obj:`.BinaryQuadraticModel`):
A binary quadratic model. It is used to calculate the energies
and set the vartype.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If true, returned :obj:`.SampleSet` will have all unique samples.
sort_labels (bool, optional, default=True):
If true, :attr:`.SampleSet.variables` will be in sorted-order.
Note that mixed types are not sortable in which case the given
order will be maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> samples = dimod.SampleSet.from_samples_bqm({'a': -1, 'b': 1}, bqm)
"""
# more performant to do this once, here rather than again in bqm.energies
# and in cls.from_samples
samples_like = as_samples(samples_like)
energies = bqm.energies(samples_like)
return cls.from_samples(samples_like, energy=energies, vartype=bqm.vartype, **kwargs) | python | def from_samples_bqm(cls, samples_like, bqm, **kwargs):
"""Build a SampleSet from raw samples using a BinaryQuadraticModel to get energies and vartype.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like.
See :func:`.as_samples`.
bqm (:obj:`.BinaryQuadraticModel`):
A binary quadratic model. It is used to calculate the energies
and set the vartype.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If true, returned :obj:`.SampleSet` will have all unique samples.
sort_labels (bool, optional, default=True):
If true, :attr:`.SampleSet.variables` will be in sorted-order.
Note that mixed types are not sortable in which case the given
order will be maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> samples = dimod.SampleSet.from_samples_bqm({'a': -1, 'b': 1}, bqm)
"""
# more performant to do this once, here rather than again in bqm.energies
# and in cls.from_samples
samples_like = as_samples(samples_like)
energies = bqm.energies(samples_like)
return cls.from_samples(samples_like, energy=energies, vartype=bqm.vartype, **kwargs) | [
"def",
"from_samples_bqm",
"(",
"cls",
",",
"samples_like",
",",
"bqm",
",",
"*",
"*",
"kwargs",
")",
":",
"# more performant to do this once, here rather than again in bqm.energies",
"# and in cls.from_samples",
"samples_like",
"=",
"as_samples",
"(",
"samples_like",
")",
... | Build a SampleSet from raw samples using a BinaryQuadraticModel to get energies and vartype.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like.
See :func:`.as_samples`.
bqm (:obj:`.BinaryQuadraticModel`):
A binary quadratic model. It is used to calculate the energies
and set the vartype.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If true, returned :obj:`.SampleSet` will have all unique samples.
sort_labels (bool, optional, default=True):
If true, :attr:`.SampleSet.variables` will be in sorted-order.
Note that mixed types are not sortable in which case the given
order will be maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> samples = dimod.SampleSet.from_samples_bqm({'a': -1, 'b': 1}, bqm) | [
"Build",
"a",
"SampleSet",
"from",
"raw",
"samples",
"using",
"a",
"BinaryQuadraticModel",
"to",
"get",
"energies",
"and",
"vartype",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L435-L479 | train | 212,637 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.data_vectors | def data_vectors(self):
"""The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1])
"""
return {field: self.record[field] for field in self.record.dtype.names
if field != 'sample'} | python | def data_vectors(self):
"""The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1])
"""
return {field: self.record[field] for field in self.record.dtype.names
if field != 'sample'} | [
"def",
"data_vectors",
"(",
"self",
")",
":",
"return",
"{",
"field",
":",
"self",
".",
"record",
"[",
"field",
"]",
"for",
"field",
"in",
"self",
".",
"record",
".",
"dtype",
".",
"names",
"if",
"field",
"!=",
"'sample'",
"}"
] | The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1]) | [
"The",
"per",
"-",
"sample",
"data",
"in",
"a",
"vector",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L598-L621 | train | 212,638 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.first | def first(self):
"""Sample with the lowest-energy.
Raises:
ValueError: If empty.
Example:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1})
>>> sampleset.first
Sample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1)
"""
try:
return next(self.data(sorted_by='energy', name='Sample'))
except StopIteration:
raise ValueError('{} is empty'.format(self.__class__.__name__)) | python | def first(self):
"""Sample with the lowest-energy.
Raises:
ValueError: If empty.
Example:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1})
>>> sampleset.first
Sample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1)
"""
try:
return next(self.data(sorted_by='energy', name='Sample'))
except StopIteration:
raise ValueError('{} is empty'.format(self.__class__.__name__)) | [
"def",
"first",
"(",
"self",
")",
":",
"try",
":",
"return",
"next",
"(",
"self",
".",
"data",
"(",
"sorted_by",
"=",
"'energy'",
",",
"name",
"=",
"'Sample'",
")",
")",
"except",
"StopIteration",
":",
"raise",
"ValueError",
"(",
"'{} is empty'",
".",
... | Sample with the lowest-energy.
Raises:
ValueError: If empty.
Example:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1})
>>> sampleset.first
Sample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1) | [
"Sample",
"with",
"the",
"lowest",
"-",
"energy",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L624-L640 | train | 212,639 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.done | def done(self):
"""Return True if a pending computation is done.
Used when a :class:`SampleSet` is constructed with :meth:`SampleSet.from_future`.
Examples:
This example uses a :class:`~concurrent.futures.Future` object directly. Typically
a :class:`~concurrent.futures.Executor` sets the result of the future
(see documentation for :mod:`concurrent.futures`).
>>> import dimod
>>> from concurrent.futures import Future
...
>>> future = Future()
>>> sampleset = dimod.SampleSet.from_future(future)
>>> future.done()
False
>>> future.set_result(dimod.ExactSolver().sample_ising({0: -1}, {}))
>>> future.done()
True
>>> sampleset.record.sample
array([[-1],
[ 1]], dtype=int8)
"""
return (not hasattr(self, '_future')) or (not hasattr(self._future, 'done')) or self._future.done() | python | def done(self):
"""Return True if a pending computation is done.
Used when a :class:`SampleSet` is constructed with :meth:`SampleSet.from_future`.
Examples:
This example uses a :class:`~concurrent.futures.Future` object directly. Typically
a :class:`~concurrent.futures.Executor` sets the result of the future
(see documentation for :mod:`concurrent.futures`).
>>> import dimod
>>> from concurrent.futures import Future
...
>>> future = Future()
>>> sampleset = dimod.SampleSet.from_future(future)
>>> future.done()
False
>>> future.set_result(dimod.ExactSolver().sample_ising({0: -1}, {}))
>>> future.done()
True
>>> sampleset.record.sample
array([[-1],
[ 1]], dtype=int8)
"""
return (not hasattr(self, '_future')) or (not hasattr(self._future, 'done')) or self._future.done() | [
"def",
"done",
"(",
"self",
")",
":",
"return",
"(",
"not",
"hasattr",
"(",
"self",
",",
"'_future'",
")",
")",
"or",
"(",
"not",
"hasattr",
"(",
"self",
".",
"_future",
",",
"'done'",
")",
")",
"or",
"self",
".",
"_future",
".",
"done",
"(",
")"... | Return True if a pending computation is done.
Used when a :class:`SampleSet` is constructed with :meth:`SampleSet.from_future`.
Examples:
This example uses a :class:`~concurrent.futures.Future` object directly. Typically
a :class:`~concurrent.futures.Executor` sets the result of the future
(see documentation for :mod:`concurrent.futures`).
>>> import dimod
>>> from concurrent.futures import Future
...
>>> future = Future()
>>> sampleset = dimod.SampleSet.from_future(future)
>>> future.done()
False
>>> future.set_result(dimod.ExactSolver().sample_ising({0: -1}, {}))
>>> future.done()
True
>>> sampleset.record.sample
array([[-1],
[ 1]], dtype=int8) | [
"Return",
"True",
"if",
"a",
"pending",
"computation",
"is",
"done",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L697-L722 | train | 212,640 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.samples | def samples(self, n=None, sorted_by='energy'):
"""Return an iterable over the samples.
Args:
n (int, optional, default=None):
Maximum number of samples to return in the view.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None,
samples are returned in record order.
Returns:
:obj:`.SamplesArray`: A view object mapping variable labels to
values.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> for sample in sampleset.samples():
... print(sample)
{'a': -1, 'b': 1}
{'a': 1, 'b': -1}
{'a': -1, 'b': -1}
{'a': 1, 'b': 1}
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> samples = sampleset.samples()
>>> samples[0]
{'a': -1, 'b': 1}
>>> samples[0, 'a']
-1
>>> samples[0, ['b', 'a']]
array([ 1, -1], dtype=int8)
>>> samples[1:, ['a', 'b']]
array([[ 1, -1],
[-1, -1],
[ 1, 1]], dtype=int8)
"""
if n is not None:
return self.samples(sorted_by=sorted_by)[:n]
if sorted_by is None:
samples = self.record.sample
else:
order = np.argsort(self.record[sorted_by])
samples = self.record.sample[order]
return SamplesArray(samples, self.variables) | python | def samples(self, n=None, sorted_by='energy'):
"""Return an iterable over the samples.
Args:
n (int, optional, default=None):
Maximum number of samples to return in the view.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None,
samples are returned in record order.
Returns:
:obj:`.SamplesArray`: A view object mapping variable labels to
values.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> for sample in sampleset.samples():
... print(sample)
{'a': -1, 'b': 1}
{'a': 1, 'b': -1}
{'a': -1, 'b': -1}
{'a': 1, 'b': 1}
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> samples = sampleset.samples()
>>> samples[0]
{'a': -1, 'b': 1}
>>> samples[0, 'a']
-1
>>> samples[0, ['b', 'a']]
array([ 1, -1], dtype=int8)
>>> samples[1:, ['a', 'b']]
array([[ 1, -1],
[-1, -1],
[ 1, 1]], dtype=int8)
"""
if n is not None:
return self.samples(sorted_by=sorted_by)[:n]
if sorted_by is None:
samples = self.record.sample
else:
order = np.argsort(self.record[sorted_by])
samples = self.record.sample[order]
return SamplesArray(samples, self.variables) | [
"def",
"samples",
"(",
"self",
",",
"n",
"=",
"None",
",",
"sorted_by",
"=",
"'energy'",
")",
":",
"if",
"n",
"is",
"not",
"None",
":",
"return",
"self",
".",
"samples",
"(",
"sorted_by",
"=",
"sorted_by",
")",
"[",
":",
"n",
"]",
"if",
"sorted_by"... | Return an iterable over the samples.
Args:
n (int, optional, default=None):
Maximum number of samples to return in the view.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None,
samples are returned in record order.
Returns:
:obj:`.SamplesArray`: A view object mapping variable labels to
values.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> for sample in sampleset.samples():
... print(sample)
{'a': -1, 'b': 1}
{'a': 1, 'b': -1}
{'a': -1, 'b': -1}
{'a': 1, 'b': 1}
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> samples = sampleset.samples()
>>> samples[0]
{'a': -1, 'b': 1}
>>> samples[0, 'a']
-1
>>> samples[0, ['b', 'a']]
array([ 1, -1], dtype=int8)
>>> samples[1:, ['a', 'b']]
array([[ 1, -1],
[-1, -1],
[ 1, 1]], dtype=int8) | [
"Return",
"an",
"iterable",
"over",
"the",
"samples",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L724-L774 | train | 212,641 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.copy | def copy(self):
"""Create a shallow copy."""
return self.__class__(self.record.copy(),
self.variables, # a new one is made in all cases
self.info.copy(),
self.vartype) | python | def copy(self):
"""Create a shallow copy."""
return self.__class__(self.record.copy(),
self.variables, # a new one is made in all cases
self.info.copy(),
self.vartype) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"record",
".",
"copy",
"(",
")",
",",
"self",
".",
"variables",
",",
"# a new one is made in all cases",
"self",
".",
"info",
".",
"copy",
"(",
")",
",",
"self"... | Create a shallow copy. | [
"Create",
"a",
"shallow",
"copy",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L880-L885 | train | 212,642 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.aggregate | def aggregate(self):
"""Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are.
"""
_, indices, inverse = np.unique(self.record.sample, axis=0,
return_index=True, return_inverse=True)
# unique also sorts the array which we don't want, so we undo the sort
order = np.argsort(indices)
indices = indices[order]
record = self.record[indices]
# fix the number of occurrences
record.num_occurrences = 0
for old_idx, new_idx in enumerate(inverse):
new_idx = order[new_idx]
record[new_idx].num_occurrences += self.record[old_idx].num_occurrences
# dev note: we don't check the energies as they should be the same
# for individual samples
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | python | def aggregate(self):
"""Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are.
"""
_, indices, inverse = np.unique(self.record.sample, axis=0,
return_index=True, return_inverse=True)
# unique also sorts the array which we don't want, so we undo the sort
order = np.argsort(indices)
indices = indices[order]
record = self.record[indices]
# fix the number of occurrences
record.num_occurrences = 0
for old_idx, new_idx in enumerate(inverse):
new_idx = order[new_idx]
record[new_idx].num_occurrences += self.record[old_idx].num_occurrences
# dev note: we don't check the energies as they should be the same
# for individual samples
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | [
"def",
"aggregate",
"(",
"self",
")",
":",
"_",
",",
"indices",
",",
"inverse",
"=",
"np",
".",
"unique",
"(",
"self",
".",
"record",
".",
"sample",
",",
"axis",
"=",
"0",
",",
"return_index",
"=",
"True",
",",
"return_inverse",
"=",
"True",
")",
"... | Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are. | [
"Create",
"a",
"new",
"SampleSet",
"with",
"repeated",
"samples",
"aggregated",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L979-L1009 | train | 212,643 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.append_variables | def append_variables(self, samples_like, sort_labels=True):
"""Create a new sampleset with the given variables with values added.
Not defined for empty sample sets. Note that when `sample_like` is
a :obj:`.SampleSet`, the data vectors and info are ignored.
Args:
samples_like:
Samples to add to the sample set. Should either be a single
sample or should match the length of the sample set. See
:func:`.as_samples` for what is allowed to be `samples_like`.
sort_labels (bool, optional, default=True):
If true, returned :attr:`.SampleSet.variables` will be in
sorted-order. Note that mixed types are not sortable in which
case the given order will be maintained.
Returns:
:obj:`.SampleSet`: A new sample set with the variables/values added.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([{'a': -1, 'b': +1},
... {'a': +1, 'b': +1}],
... dimod.SPIN,
... energy=[-1.0, 1.0])
>>> new = sampleset.append_variables({'c': -1})
>>> print(new)
a b c energy num_oc.
0 -1 +1 -1 -1.0 1
1 +1 +1 -1 1.0 1
['SPIN', 2 rows, 2 samples, 3 variables]
Add variables from another sampleset to the original above. Note
that the energies do not change.
>>> another = dimod.SampleSet.from_samples([{'c': -1, 'd': +1},
... {'c': +1, 'd': +1}],
... dimod.SPIN,
... energy=[-2.0, 1.0])
>>> new = sampleset.append_variables(another)
>>> print(new)
a b c d energy num_oc.
0 -1 +1 -1 +1 -1.0 1
1 +1 +1 +1 +1 1.0 1
['SPIN', 2 rows, 2 samples, 4 variables]
"""
samples, labels = as_samples(samples_like)
num_samples = len(self)
# we don't handle multiple values
if samples.shape[0] == num_samples:
# we don't need to do anything, it's already the correct shape
pass
elif samples.shape[0] == 1 and num_samples:
samples = np.repeat(samples, num_samples, axis=0)
else:
msg = ("mismatched shape. The samples to append should either be "
"a single sample or should match the length of the sample "
"set. Empty sample sets cannot be appended to.")
raise ValueError(msg)
# append requires the new variables to be unique
variables = self.variables
if any(v in variables for v in labels):
msg = "Appended samples cannot contain variables in sample set"
raise ValueError(msg)
new_variables = list(variables) + labels
new_samples = np.hstack((self.record.sample, samples))
return type(self).from_samples((new_samples, new_variables),
self.vartype,
info=copy.deepcopy(self.info), # make a copy
sort_labels=sort_labels,
**self.data_vectors) | python | def append_variables(self, samples_like, sort_labels=True):
"""Create a new sampleset with the given variables with values added.
Not defined for empty sample sets. Note that when `sample_like` is
a :obj:`.SampleSet`, the data vectors and info are ignored.
Args:
samples_like:
Samples to add to the sample set. Should either be a single
sample or should match the length of the sample set. See
:func:`.as_samples` for what is allowed to be `samples_like`.
sort_labels (bool, optional, default=True):
If true, returned :attr:`.SampleSet.variables` will be in
sorted-order. Note that mixed types are not sortable in which
case the given order will be maintained.
Returns:
:obj:`.SampleSet`: A new sample set with the variables/values added.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([{'a': -1, 'b': +1},
... {'a': +1, 'b': +1}],
... dimod.SPIN,
... energy=[-1.0, 1.0])
>>> new = sampleset.append_variables({'c': -1})
>>> print(new)
a b c energy num_oc.
0 -1 +1 -1 -1.0 1
1 +1 +1 -1 1.0 1
['SPIN', 2 rows, 2 samples, 3 variables]
Add variables from another sampleset to the original above. Note
that the energies do not change.
>>> another = dimod.SampleSet.from_samples([{'c': -1, 'd': +1},
... {'c': +1, 'd': +1}],
... dimod.SPIN,
... energy=[-2.0, 1.0])
>>> new = sampleset.append_variables(another)
>>> print(new)
a b c d energy num_oc.
0 -1 +1 -1 +1 -1.0 1
1 +1 +1 +1 +1 1.0 1
['SPIN', 2 rows, 2 samples, 4 variables]
"""
samples, labels = as_samples(samples_like)
num_samples = len(self)
# we don't handle multiple values
if samples.shape[0] == num_samples:
# we don't need to do anything, it's already the correct shape
pass
elif samples.shape[0] == 1 and num_samples:
samples = np.repeat(samples, num_samples, axis=0)
else:
msg = ("mismatched shape. The samples to append should either be "
"a single sample or should match the length of the sample "
"set. Empty sample sets cannot be appended to.")
raise ValueError(msg)
# append requires the new variables to be unique
variables = self.variables
if any(v in variables for v in labels):
msg = "Appended samples cannot contain variables in sample set"
raise ValueError(msg)
new_variables = list(variables) + labels
new_samples = np.hstack((self.record.sample, samples))
return type(self).from_samples((new_samples, new_variables),
self.vartype,
info=copy.deepcopy(self.info), # make a copy
sort_labels=sort_labels,
**self.data_vectors) | [
"def",
"append_variables",
"(",
"self",
",",
"samples_like",
",",
"sort_labels",
"=",
"True",
")",
":",
"samples",
",",
"labels",
"=",
"as_samples",
"(",
"samples_like",
")",
"num_samples",
"=",
"len",
"(",
"self",
")",
"# we don't handle multiple values",
"if",... | Create a new sampleset with the given variables with values added.
Not defined for empty sample sets. Note that when `sample_like` is
a :obj:`.SampleSet`, the data vectors and info are ignored.
Args:
samples_like:
Samples to add to the sample set. Should either be a single
sample or should match the length of the sample set. See
:func:`.as_samples` for what is allowed to be `samples_like`.
sort_labels (bool, optional, default=True):
If true, returned :attr:`.SampleSet.variables` will be in
sorted-order. Note that mixed types are not sortable in which
case the given order will be maintained.
Returns:
:obj:`.SampleSet`: A new sample set with the variables/values added.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([{'a': -1, 'b': +1},
... {'a': +1, 'b': +1}],
... dimod.SPIN,
... energy=[-1.0, 1.0])
>>> new = sampleset.append_variables({'c': -1})
>>> print(new)
a b c energy num_oc.
0 -1 +1 -1 -1.0 1
1 +1 +1 -1 1.0 1
['SPIN', 2 rows, 2 samples, 3 variables]
Add variables from another sampleset to the original above. Note
that the energies do not change.
>>> another = dimod.SampleSet.from_samples([{'c': -1, 'd': +1},
... {'c': +1, 'd': +1}],
... dimod.SPIN,
... energy=[-2.0, 1.0])
>>> new = sampleset.append_variables(another)
>>> print(new)
a b c d energy num_oc.
0 -1 +1 -1 +1 -1.0 1
1 +1 +1 +1 +1 1.0 1
['SPIN', 2 rows, 2 samples, 4 variables] | [
"Create",
"a",
"new",
"sampleset",
"with",
"the",
"given",
"variables",
"with",
"values",
"added",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L1011-L1088 | train | 212,644 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.lowest | def lowest(self, rtol=1.e-5, atol=1.e-8):
"""Return a sample set containing the lowest-energy samples.
A sample is included if its energy is within tolerance of the lowest
energy in the sample set. The following equation is used to determine
if two values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
See :func:`numpy.isclose` for additional details and caveats.
Args:
rtol (float, optional, default=1.e-5):
The relative tolerance (see above).
atol (float, optional, default=1.e-8):
The absolute tolerance (see above).
Returns:
:obj:`.SampleSet`: A new sample set containing the lowest energy
samples as delimited by configured tolerances from the lowest energy
sample in the current sample set.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': .001},
... {('a', 'b'): -1})
>>> print(sampleset.lowest())
a b energy num_oc.
0 -1 -1 -1.001 1
['SPIN', 1 rows, 1 samples, 2 variables]
>>> print(sampleset.lowest(atol=.1))
a b energy num_oc.
0 -1 -1 -1.001 1
1 +1 +1 -0.999 1
['SPIN', 2 rows, 2 samples, 2 variables]
Note:
"Lowest energy" is the lowest energy in the sample set. This is not
always the "ground energy" which is the lowest energy possible
for a binary quadratic model.
"""
if len(self) == 0:
# empty so all are lowest
return self.copy()
record = self.record
# want all the rows within tolerance of the minimal energy
close = np.isclose(record.energy,
np.min(record.energy),
rtol=rtol, atol=atol)
record = record[close]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | python | def lowest(self, rtol=1.e-5, atol=1.e-8):
"""Return a sample set containing the lowest-energy samples.
A sample is included if its energy is within tolerance of the lowest
energy in the sample set. The following equation is used to determine
if two values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
See :func:`numpy.isclose` for additional details and caveats.
Args:
rtol (float, optional, default=1.e-5):
The relative tolerance (see above).
atol (float, optional, default=1.e-8):
The absolute tolerance (see above).
Returns:
:obj:`.SampleSet`: A new sample set containing the lowest energy
samples as delimited by configured tolerances from the lowest energy
sample in the current sample set.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': .001},
... {('a', 'b'): -1})
>>> print(sampleset.lowest())
a b energy num_oc.
0 -1 -1 -1.001 1
['SPIN', 1 rows, 1 samples, 2 variables]
>>> print(sampleset.lowest(atol=.1))
a b energy num_oc.
0 -1 -1 -1.001 1
1 +1 +1 -0.999 1
['SPIN', 2 rows, 2 samples, 2 variables]
Note:
"Lowest energy" is the lowest energy in the sample set. This is not
always the "ground energy" which is the lowest energy possible
for a binary quadratic model.
"""
if len(self) == 0:
# empty so all are lowest
return self.copy()
record = self.record
# want all the rows within tolerance of the minimal energy
close = np.isclose(record.energy,
np.min(record.energy),
rtol=rtol, atol=atol)
record = record[close]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | [
"def",
"lowest",
"(",
"self",
",",
"rtol",
"=",
"1.e-5",
",",
"atol",
"=",
"1.e-8",
")",
":",
"if",
"len",
"(",
"self",
")",
"==",
"0",
":",
"# empty so all are lowest",
"return",
"self",
".",
"copy",
"(",
")",
"record",
"=",
"self",
".",
"record",
... | Return a sample set containing the lowest-energy samples.
A sample is included if its energy is within tolerance of the lowest
energy in the sample set. The following equation is used to determine
if two values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
See :func:`numpy.isclose` for additional details and caveats.
Args:
rtol (float, optional, default=1.e-5):
The relative tolerance (see above).
atol (float, optional, default=1.e-8):
The absolute tolerance (see above).
Returns:
:obj:`.SampleSet`: A new sample set containing the lowest energy
samples as delimited by configured tolerances from the lowest energy
sample in the current sample set.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': .001},
... {('a', 'b'): -1})
>>> print(sampleset.lowest())
a b energy num_oc.
0 -1 -1 -1.001 1
['SPIN', 1 rows, 1 samples, 2 variables]
>>> print(sampleset.lowest(atol=.1))
a b energy num_oc.
0 -1 -1 -1.001 1
1 +1 +1 -0.999 1
['SPIN', 2 rows, 2 samples, 2 variables]
Note:
"Lowest energy" is the lowest energy in the sample set. This is not
always the "ground energy" which is the lowest energy possible
for a binary quadratic model. | [
"Return",
"a",
"sample",
"set",
"containing",
"the",
"lowest",
"-",
"energy",
"samples",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L1090-L1146 | train | 212,645 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.slice | def slice(self, *slice_args, **kwargs):
"""Create a new SampleSet with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned SampleSet.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)), dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
>>> # the first 3 samples by energy == truncate(3)
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # the last 3 samples by energy
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # every second sample in between (skip the top and the bottom 3)
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables]
"""
# handle `sorted_by` kwarg with a default value in a python2-compatible way
sorted_by = kwargs.pop('sorted_by', 'energy')
if kwargs:
# be strict about allowed kwargs: throw the same error as python3 would
raise TypeError('slice got an unexpected '
'keyword argument {!r}'.format(kwargs.popitem()[0]))
# follow Python's slice syntax
if slice_args:
selector = slice(*slice_args)
else:
selector = slice(None)
if sorted_by is None:
record = self.record[selector]
else:
sort_indices = np.argsort(self.record[sorted_by])
record = self.record[sort_indices[selector]]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | python | def slice(self, *slice_args, **kwargs):
"""Create a new SampleSet with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned SampleSet.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)), dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
>>> # the first 3 samples by energy == truncate(3)
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # the last 3 samples by energy
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # every second sample in between (skip the top and the bottom 3)
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables]
"""
# handle `sorted_by` kwarg with a default value in a python2-compatible way
sorted_by = kwargs.pop('sorted_by', 'energy')
if kwargs:
# be strict about allowed kwargs: throw the same error as python3 would
raise TypeError('slice got an unexpected '
'keyword argument {!r}'.format(kwargs.popitem()[0]))
# follow Python's slice syntax
if slice_args:
selector = slice(*slice_args)
else:
selector = slice(None)
if sorted_by is None:
record = self.record[selector]
else:
sort_indices = np.argsort(self.record[sorted_by])
record = self.record[sort_indices[selector]]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | [
"def",
"slice",
"(",
"self",
",",
"*",
"slice_args",
",",
"*",
"*",
"kwargs",
")",
":",
"# handle `sorted_by` kwarg with a default value in a python2-compatible way",
"sorted_by",
"=",
"kwargs",
".",
"pop",
"(",
"'sorted_by'",
",",
"'energy'",
")",
"if",
"kwargs",
... | Create a new SampleSet with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned SampleSet.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)), dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
>>> # the first 3 samples by energy == truncate(3)
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # the last 3 samples by energy
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # every second sample in between (skip the top and the bottom 3)
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables] | [
"Create",
"a",
"new",
"SampleSet",
"with",
"rows",
"sliced",
"according",
"to",
"standard",
"Python",
"slicing",
"syntax",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L1188-L1273 | train | 212,646 |
dwavesystems/dimod | dimod/sampleset.py | SampleSet.to_pandas_dataframe | def to_pandas_dataframe(self, sample_column=False):
"""Convert a SampleSet to a Pandas DataFrame
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1
"""
import pandas as pd
if sample_column:
df = pd.DataFrame(self.data(sorted_by=None, sample_dict_cast=True))
else:
# work directly with the record, it's much faster
df = pd.DataFrame(self.record.sample, columns=self.variables)
for field in sorted(self.record.dtype.fields): # sort for consistency
if field == 'sample':
continue
df.loc[:, field] = self.record[field]
return df | python | def to_pandas_dataframe(self, sample_column=False):
"""Convert a SampleSet to a Pandas DataFrame
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1
"""
import pandas as pd
if sample_column:
df = pd.DataFrame(self.data(sorted_by=None, sample_dict_cast=True))
else:
# work directly with the record, it's much faster
df = pd.DataFrame(self.record.sample, columns=self.variables)
for field in sorted(self.record.dtype.fields): # sort for consistency
if field == 'sample':
continue
df.loc[:, field] = self.record[field]
return df | [
"def",
"to_pandas_dataframe",
"(",
"self",
",",
"sample_column",
"=",
"False",
")",
":",
"import",
"pandas",
"as",
"pd",
"if",
"sample_column",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"self",
".",
"data",
"(",
"sorted_by",
"=",
"None",
",",
"sample_... | Convert a SampleSet to a Pandas DataFrame
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1 | [
"Convert",
"a",
"SampleSet",
"to",
"a",
"Pandas",
"DataFrame"
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/sampleset.py#L1407-L1442 | train | 212,647 |
dwavesystems/dimod | dimod/reference/composites/higherordercomposites.py | penalty_satisfaction | def penalty_satisfaction(response, bqm):
""" Creates a penalty satisfaction list
Given a sampleSet and a bqm object, will create a binary list informing
whether the penalties introduced during degree reduction are satisfied for
each sample in sampleSet
Args:
response (:obj:`.SampleSet`): Samples corresponding to provided bqm
bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains
its reduction info.
Returns:
:obj:`numpy.ndarray`: a binary array of penalty satisfaction information
"""
record = response.record
label_dict = response.variables.index
if len(bqm.info['reduction']) == 0:
return np.array([1] * len(record.sample))
penalty_vector = np.prod([record.sample[:, label_dict[qi]] *
record.sample[:, label_dict[qj]]
== record.sample[:,
label_dict[valdict['product']]]
for (qi, qj), valdict in
bqm.info['reduction'].items()], axis=0)
return penalty_vector | python | def penalty_satisfaction(response, bqm):
""" Creates a penalty satisfaction list
Given a sampleSet and a bqm object, will create a binary list informing
whether the penalties introduced during degree reduction are satisfied for
each sample in sampleSet
Args:
response (:obj:`.SampleSet`): Samples corresponding to provided bqm
bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains
its reduction info.
Returns:
:obj:`numpy.ndarray`: a binary array of penalty satisfaction information
"""
record = response.record
label_dict = response.variables.index
if len(bqm.info['reduction']) == 0:
return np.array([1] * len(record.sample))
penalty_vector = np.prod([record.sample[:, label_dict[qi]] *
record.sample[:, label_dict[qj]]
== record.sample[:,
label_dict[valdict['product']]]
for (qi, qj), valdict in
bqm.info['reduction'].items()], axis=0)
return penalty_vector | [
"def",
"penalty_satisfaction",
"(",
"response",
",",
"bqm",
")",
":",
"record",
"=",
"response",
".",
"record",
"label_dict",
"=",
"response",
".",
"variables",
".",
"index",
"if",
"len",
"(",
"bqm",
".",
"info",
"[",
"'reduction'",
"]",
")",
"==",
"0",
... | Creates a penalty satisfaction list
Given a sampleSet and a bqm object, will create a binary list informing
whether the penalties introduced during degree reduction are satisfied for
each sample in sampleSet
Args:
response (:obj:`.SampleSet`): Samples corresponding to provided bqm
bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains
its reduction info.
Returns:
:obj:`numpy.ndarray`: a binary array of penalty satisfaction information | [
"Creates",
"a",
"penalty",
"satisfaction",
"list"
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/higherordercomposites.py#L132-L161 | train | 212,648 |
dwavesystems/dimod | dimod/reference/composites/higherordercomposites.py | polymorph_response | def polymorph_response(response, poly, bqm,
penalty_strength=None,
keep_penalty_variables=True,
discard_unsatisfied=False):
""" Transforms the sampleset for the higher order problem.
Given a response of a penalized HUBO, this function creates a new sampleset
object, taking into account penalty information and calculates the
energies of samples for the higherorder problem.
Args:
response (:obj:`.SampleSet`): response for a penalized hubo.
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model of the
reduced problem.
penalty_strength (float, optional): default is None, if provided,
will be added to the info field of the returned sampleSet object.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
Returns:
(:obj:`.SampleSet'): A sampleSet object that has additional penalty
information. The energies of samples are calculated for the HUBO
ignoring the penalty variables.
"""
record = response.record
penalty_vector = penalty_satisfaction(response, bqm)
original_variables = bqm.variables
if discard_unsatisfied:
samples_to_keep = list(map(bool, list(penalty_vector)))
penalty_vector = np.array([True] * np.sum(samples_to_keep))
else:
samples_to_keep = list(map(bool, [1] * len(record.sample)))
samples = record.sample[samples_to_keep]
energy_vector = poly.energies((samples, response.variables))
if not keep_penalty_variables:
original_variables = poly.variables
idxs = [response.variables.index[v] for v in original_variables]
samples = np.asarray(samples[:, idxs])
num_samples, num_variables = np.shape(samples)
datatypes = [('sample', np.dtype(np.int8), (num_variables,)),
('energy', energy_vector.dtype),
('penalty_satisfaction',
penalty_vector.dtype)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names if
name not in {'sample',
'energy'})
data = np.rec.array(np.empty(num_samples, dtype=datatypes))
data.sample = samples
data.energy = energy_vector
for name in record.dtype.names:
if name not in {'sample', 'energy'}:
data[name] = record[name][samples_to_keep]
data['penalty_satisfaction'] = penalty_vector
response.info['reduction'] = bqm.info['reduction']
if penalty_strength is not None:
response.info['penalty_strength'] = penalty_strength
return SampleSet(data, original_variables, response.info,
response.vartype) | python | def polymorph_response(response, poly, bqm,
penalty_strength=None,
keep_penalty_variables=True,
discard_unsatisfied=False):
""" Transforms the sampleset for the higher order problem.
Given a response of a penalized HUBO, this function creates a new sampleset
object, taking into account penalty information and calculates the
energies of samples for the higherorder problem.
Args:
response (:obj:`.SampleSet`): response for a penalized hubo.
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model of the
reduced problem.
penalty_strength (float, optional): default is None, if provided,
will be added to the info field of the returned sampleSet object.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
Returns:
(:obj:`.SampleSet'): A sampleSet object that has additional penalty
information. The energies of samples are calculated for the HUBO
ignoring the penalty variables.
"""
record = response.record
penalty_vector = penalty_satisfaction(response, bqm)
original_variables = bqm.variables
if discard_unsatisfied:
samples_to_keep = list(map(bool, list(penalty_vector)))
penalty_vector = np.array([True] * np.sum(samples_to_keep))
else:
samples_to_keep = list(map(bool, [1] * len(record.sample)))
samples = record.sample[samples_to_keep]
energy_vector = poly.energies((samples, response.variables))
if not keep_penalty_variables:
original_variables = poly.variables
idxs = [response.variables.index[v] for v in original_variables]
samples = np.asarray(samples[:, idxs])
num_samples, num_variables = np.shape(samples)
datatypes = [('sample', np.dtype(np.int8), (num_variables,)),
('energy', energy_vector.dtype),
('penalty_satisfaction',
penalty_vector.dtype)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names if
name not in {'sample',
'energy'})
data = np.rec.array(np.empty(num_samples, dtype=datatypes))
data.sample = samples
data.energy = energy_vector
for name in record.dtype.names:
if name not in {'sample', 'energy'}:
data[name] = record[name][samples_to_keep]
data['penalty_satisfaction'] = penalty_vector
response.info['reduction'] = bqm.info['reduction']
if penalty_strength is not None:
response.info['penalty_strength'] = penalty_strength
return SampleSet(data, original_variables, response.info,
response.vartype) | [
"def",
"polymorph_response",
"(",
"response",
",",
"poly",
",",
"bqm",
",",
"penalty_strength",
"=",
"None",
",",
"keep_penalty_variables",
"=",
"True",
",",
"discard_unsatisfied",
"=",
"False",
")",
":",
"record",
"=",
"response",
".",
"record",
"penalty_vector... | Transforms the sampleset for the higher order problem.
Given a response of a penalized HUBO, this function creates a new sampleset
object, taking into account penalty information and calculates the
energies of samples for the higherorder problem.
Args:
response (:obj:`.SampleSet`): response for a penalized hubo.
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model of the
reduced problem.
penalty_strength (float, optional): default is None, if provided,
will be added to the info field of the returned sampleSet object.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
Returns:
(:obj:`.SampleSet'): A sampleSet object that has additional penalty
information. The energies of samples are calculated for the HUBO
ignoring the penalty variables. | [
"Transforms",
"the",
"sampleset",
"for",
"the",
"higher",
"order",
"problem",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/higherordercomposites.py#L164-L239 | train | 212,649 |
dwavesystems/dimod | dimod/reference/composites/higherordercomposites.py | HigherOrderComposite.sample_poly | def sample_poly(self, poly, penalty_strength=1.0,
keep_penalty_variables=False,
discard_unsatisfied=False, **parameters):
"""Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
bqm = make_quadratic(poly, penalty_strength, vartype=poly.vartype)
response = self.child.sample(bqm, **parameters)
return polymorph_response(response, poly, bqm,
penalty_strength=penalty_strength,
keep_penalty_variables=keep_penalty_variables,
discard_unsatisfied=discard_unsatisfied) | python | def sample_poly(self, poly, penalty_strength=1.0,
keep_penalty_variables=False,
discard_unsatisfied=False, **parameters):
"""Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
bqm = make_quadratic(poly, penalty_strength, vartype=poly.vartype)
response = self.child.sample(bqm, **parameters)
return polymorph_response(response, poly, bqm,
penalty_strength=penalty_strength,
keep_penalty_variables=keep_penalty_variables,
discard_unsatisfied=discard_unsatisfied) | [
"def",
"sample_poly",
"(",
"self",
",",
"poly",
",",
"penalty_strength",
"=",
"1.0",
",",
"keep_penalty_variables",
"=",
"False",
",",
"discard_unsatisfied",
"=",
"False",
",",
"*",
"*",
"parameters",
")",
":",
"bqm",
"=",
"make_quadratic",
"(",
"poly",
",",... | Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet` | [
"Sample",
"from",
"the",
"given",
"binary",
"polynomial",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/higherordercomposites.py#L92-L129 | train | 212,650 |
dwavesystems/dimod | dimod/reference/composites/higherordercomposites.py | PolyScaleComposite.sample_poly | def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None,
ignored_terms=None, **parameters):
"""Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler.
"""
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {frozenset(term) for term in ignored_terms}
# scale and normalize happen in-place so we need to make a copy
original, poly = poly, poly.copy()
if scalar is not None:
poly.scale(scalar, ignored_terms=ignored_terms)
else:
poly.normalize(bias_range=bias_range, poly_range=poly_range,
ignored_terms=ignored_terms)
# we need to know how much we scaled by, which we can do by looking
# at the biases
try:
v = next(v for v, bias in original.items()
if bias and v not in ignored_terms)
except StopIteration:
# nothing to scale
scalar = 1
else:
scalar = poly[v] / original[v]
sampleset = self.child.sample_poly(poly, **parameters)
if ignored_terms:
# we need to recalculate the energy
sampleset.record.energy = original.energies((sampleset.record.sample,
sampleset.variables))
else:
sampleset.record.energy /= scalar
return sampleset | python | def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None,
ignored_terms=None, **parameters):
"""Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler.
"""
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {frozenset(term) for term in ignored_terms}
# scale and normalize happen in-place so we need to make a copy
original, poly = poly, poly.copy()
if scalar is not None:
poly.scale(scalar, ignored_terms=ignored_terms)
else:
poly.normalize(bias_range=bias_range, poly_range=poly_range,
ignored_terms=ignored_terms)
# we need to know how much we scaled by, which we can do by looking
# at the biases
try:
v = next(v for v, bias in original.items()
if bias and v not in ignored_terms)
except StopIteration:
# nothing to scale
scalar = 1
else:
scalar = poly[v] / original[v]
sampleset = self.child.sample_poly(poly, **parameters)
if ignored_terms:
# we need to recalculate the energy
sampleset.record.energy = original.energies((sampleset.record.sample,
sampleset.variables))
else:
sampleset.record.energy /= scalar
return sampleset | [
"def",
"sample_poly",
"(",
"self",
",",
"poly",
",",
"scalar",
"=",
"None",
",",
"bias_range",
"=",
"1",
",",
"poly_range",
"=",
"None",
",",
"ignored_terms",
"=",
"None",
",",
"*",
"*",
"parameters",
")",
":",
"if",
"ignored_terms",
"is",
"None",
":",... | Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler. | [
"Scale",
"and",
"sample",
"from",
"the",
"given",
"binary",
"polynomial",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/higherordercomposites.py#L283-L347 | train | 212,651 |
dwavesystems/dimod | dimod/reference/composites/higherordercomposites.py | PolyTruncateComposite.sample_poly | def sample_poly(self, poly, **kwargs):
"""Sample from the binary polynomial and truncate output.
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet`
"""
tkw = self._truncate_kwargs
if self._aggregate:
return self.child.sample_poly(poly, **kwargs).aggregate().truncate(**tkw)
else:
return self.child.sample_poly(poly, **kwargs).truncate(**tkw) | python | def sample_poly(self, poly, **kwargs):
"""Sample from the binary polynomial and truncate output.
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet`
"""
tkw = self._truncate_kwargs
if self._aggregate:
return self.child.sample_poly(poly, **kwargs).aggregate().truncate(**tkw)
else:
return self.child.sample_poly(poly, **kwargs).truncate(**tkw) | [
"def",
"sample_poly",
"(",
"self",
",",
"poly",
",",
"*",
"*",
"kwargs",
")",
":",
"tkw",
"=",
"self",
".",
"_truncate_kwargs",
"if",
"self",
".",
"_aggregate",
":",
"return",
"self",
".",
"child",
".",
"sample_poly",
"(",
"poly",
",",
"*",
"*",
"kwa... | Sample from the binary polynomial and truncate output.
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet` | [
"Sample",
"from",
"the",
"binary",
"polynomial",
"and",
"truncate",
"output",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/higherordercomposites.py#L398-L416 | train | 212,652 |
dwavesystems/dimod | dimod/response.py | _samples_dicts_to_array | def _samples_dicts_to_array(samples_dicts, labels):
"""Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None.
"""
itersamples = iter(samples_dicts)
first_sample = next(itersamples)
if labels is None:
labels = list(first_sample)
num_variables = len(labels)
def _iter_samples():
yield np.fromiter((first_sample[v] for v in labels),
count=num_variables, dtype=np.int8)
try:
for sample in itersamples:
yield np.fromiter((sample[v] for v in labels),
count=num_variables, dtype=np.int8)
except KeyError:
msg = ("Each dict in 'samples' must have the same keys.")
raise ValueError(msg)
return np.stack(list(_iter_samples())), labels | python | def _samples_dicts_to_array(samples_dicts, labels):
"""Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None.
"""
itersamples = iter(samples_dicts)
first_sample = next(itersamples)
if labels is None:
labels = list(first_sample)
num_variables = len(labels)
def _iter_samples():
yield np.fromiter((first_sample[v] for v in labels),
count=num_variables, dtype=np.int8)
try:
for sample in itersamples:
yield np.fromiter((sample[v] for v in labels),
count=num_variables, dtype=np.int8)
except KeyError:
msg = ("Each dict in 'samples' must have the same keys.")
raise ValueError(msg)
return np.stack(list(_iter_samples())), labels | [
"def",
"_samples_dicts_to_array",
"(",
"samples_dicts",
",",
"labels",
")",
":",
"itersamples",
"=",
"iter",
"(",
"samples_dicts",
")",
"first_sample",
"=",
"next",
"(",
"itersamples",
")",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"list",
"(",
"firs... | Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None. | [
"Convert",
"an",
"iterable",
"of",
"samples",
"where",
"each",
"sample",
"is",
"a",
"dict",
"to",
"a",
"numpy",
"2d",
"array",
".",
"Also",
"determines",
"the",
"labels",
"is",
"they",
"are",
"None",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/response.py#L183-L208 | train | 212,653 |
dwavesystems/dimod | dimod/response.py | data_struct_array | def data_struct_array(sample, **vectors): # data_struct_array(sample, *, energy, **vectors):
"""Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]
"""
if not len(sample):
# if samples are empty
sample = np.zeros((0, 0), dtype=np.int8)
else:
sample = np.asarray(sample, dtype=np.int8)
if sample.ndim < 2:
sample = np.expand_dims(sample, 0)
num_samples, num_variables = sample.shape
if 'num_occurrences' not in vectors:
vectors['num_occurrences'] = [1] * num_samples
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for kwarg, vector in vectors.items():
dtype = float if kwarg == 'energy' else None
datavectors[kwarg] = vector = np.asarray(vector, dtype)
if len(vector.shape) < 1 or vector.shape[0] != num_samples:
msg = ('{} and sample have a mismatched shape {}, {}. They must have the same size '
'in the first axis.').format(kwarg, vector.shape, sample.shape)
raise ValueError(msg)
datatypes.append((kwarg, vector.dtype, vector.shape[1:]))
if 'energy' not in datavectors:
# consistent error with the one thrown in python3
raise TypeError('data_struct_array() needs keyword-only argument energy')
elif datavectors['energy'].shape != (num_samples,):
raise ValueError('energy should be a vector of length {}'.format(num_samples))
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for kwarg, vector in datavectors.items():
data[kwarg] = vector
return data | python | def data_struct_array(sample, **vectors): # data_struct_array(sample, *, energy, **vectors):
"""Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]
"""
if not len(sample):
# if samples are empty
sample = np.zeros((0, 0), dtype=np.int8)
else:
sample = np.asarray(sample, dtype=np.int8)
if sample.ndim < 2:
sample = np.expand_dims(sample, 0)
num_samples, num_variables = sample.shape
if 'num_occurrences' not in vectors:
vectors['num_occurrences'] = [1] * num_samples
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for kwarg, vector in vectors.items():
dtype = float if kwarg == 'energy' else None
datavectors[kwarg] = vector = np.asarray(vector, dtype)
if len(vector.shape) < 1 or vector.shape[0] != num_samples:
msg = ('{} and sample have a mismatched shape {}, {}. They must have the same size '
'in the first axis.').format(kwarg, vector.shape, sample.shape)
raise ValueError(msg)
datatypes.append((kwarg, vector.dtype, vector.shape[1:]))
if 'energy' not in datavectors:
# consistent error with the one thrown in python3
raise TypeError('data_struct_array() needs keyword-only argument energy')
elif datavectors['energy'].shape != (num_samples,):
raise ValueError('energy should be a vector of length {}'.format(num_samples))
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for kwarg, vector in datavectors.items():
data[kwarg] = vector
return data | [
"def",
"data_struct_array",
"(",
"sample",
",",
"*",
"*",
"vectors",
")",
":",
"# data_struct_array(sample, *, energy, **vectors):",
"if",
"not",
"len",
"(",
"sample",
")",
":",
"# if samples are empty",
"sample",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
"... | Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs] | [
"Combine",
"samples",
"and",
"per",
"-",
"sample",
"data",
"into",
"a",
"numpy",
"structured",
"array",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/response.py#L211-L270 | train | 212,654 |
dwavesystems/dimod | dimod/response.py | Response.from_samples | def from_samples(cls, samples_like, vectors, info, vartype, variable_labels=None):
"""Build a response from samples.
Args:
samples_like:
A collection of samples. 'samples_like' is an extension of NumPy's array_like
to include an iterable of sample dictionaries (as returned by
:meth:`.Response.samples`).
data_vectors (dict[field, :obj:`numpy.array`/list]):
Additional per-sample data as a dict of vectors. Each vector is the
same length as `samples_matrix`. The key 'energy' and it's vector is required.
info (dict):
Information about the response as a whole formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the response. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_labels (list, optional):
Determines the variable labels if samples_like is not an iterable of dictionaries.
If samples_like is not an iterable of dictionaries and if variable_labels is not
provided then index labels are used.
Returns:
:obj:`.Response`
Examples:
From dicts
>>> import dimod
...
>>> samples = [{'a': -1, 'b': +1}, {'a': -1, 'b': -1}]
>>> response = dimod.Response.from_samples(samples, {'energy': [-1, 0]}, {}, dimod.SPIN)
From an array
>>> import dimod
>>> import numpy as np
...
>>> samples = np.ones((2, 3), dtype='int8') # 2 samples, 3 variables
>>> response = dimod.Response.from_samples(samples, {'energy': [-1.0, -1.0]}, {},
... dimod.SPIN, variable_labels=['a', 'b', 'c'])
"""
# there is no np.is_array_like so we use a try-except block
try:
# trying to cast it to int8 rules out list of dictionaries. If we didn't try to cast
# then it would just create a vector of np.object
samples = np.asarray(samples_like, dtype=np.int8)
except TypeError:
# if labels are None, they are set here
samples, variable_labels = _samples_dicts_to_array(samples_like, variable_labels)
assert samples.dtype == np.int8, 'sanity check'
record = data_struct_array(samples, **vectors)
# if labels are still None, set them here. We could do this in an else in the try-except
# block, but the samples-array might not have the correct shape
if variable_labels is None:
__, num_variables = record.sample.shape
variable_labels = list(range(num_variables))
return cls(record, variable_labels, info, vartype) | python | def from_samples(cls, samples_like, vectors, info, vartype, variable_labels=None):
"""Build a response from samples.
Args:
samples_like:
A collection of samples. 'samples_like' is an extension of NumPy's array_like
to include an iterable of sample dictionaries (as returned by
:meth:`.Response.samples`).
data_vectors (dict[field, :obj:`numpy.array`/list]):
Additional per-sample data as a dict of vectors. Each vector is the
same length as `samples_matrix`. The key 'energy' and it's vector is required.
info (dict):
Information about the response as a whole formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the response. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_labels (list, optional):
Determines the variable labels if samples_like is not an iterable of dictionaries.
If samples_like is not an iterable of dictionaries and if variable_labels is not
provided then index labels are used.
Returns:
:obj:`.Response`
Examples:
From dicts
>>> import dimod
...
>>> samples = [{'a': -1, 'b': +1}, {'a': -1, 'b': -1}]
>>> response = dimod.Response.from_samples(samples, {'energy': [-1, 0]}, {}, dimod.SPIN)
From an array
>>> import dimod
>>> import numpy as np
...
>>> samples = np.ones((2, 3), dtype='int8') # 2 samples, 3 variables
>>> response = dimod.Response.from_samples(samples, {'energy': [-1.0, -1.0]}, {},
... dimod.SPIN, variable_labels=['a', 'b', 'c'])
"""
# there is no np.is_array_like so we use a try-except block
try:
# trying to cast it to int8 rules out list of dictionaries. If we didn't try to cast
# then it would just create a vector of np.object
samples = np.asarray(samples_like, dtype=np.int8)
except TypeError:
# if labels are None, they are set here
samples, variable_labels = _samples_dicts_to_array(samples_like, variable_labels)
assert samples.dtype == np.int8, 'sanity check'
record = data_struct_array(samples, **vectors)
# if labels are still None, set them here. We could do this in an else in the try-except
# block, but the samples-array might not have the correct shape
if variable_labels is None:
__, num_variables = record.sample.shape
variable_labels = list(range(num_variables))
return cls(record, variable_labels, info, vartype) | [
"def",
"from_samples",
"(",
"cls",
",",
"samples_like",
",",
"vectors",
",",
"info",
",",
"vartype",
",",
"variable_labels",
"=",
"None",
")",
":",
"# there is no np.is_array_like so we use a try-except block",
"try",
":",
"# trying to cast it to int8 rules out list of dict... | Build a response from samples.
Args:
samples_like:
A collection of samples. 'samples_like' is an extension of NumPy's array_like
to include an iterable of sample dictionaries (as returned by
:meth:`.Response.samples`).
data_vectors (dict[field, :obj:`numpy.array`/list]):
Additional per-sample data as a dict of vectors. Each vector is the
same length as `samples_matrix`. The key 'energy' and it's vector is required.
info (dict):
Information about the response as a whole formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the response. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_labels (list, optional):
Determines the variable labels if samples_like is not an iterable of dictionaries.
If samples_like is not an iterable of dictionaries and if variable_labels is not
provided then index labels are used.
Returns:
:obj:`.Response`
Examples:
From dicts
>>> import dimod
...
>>> samples = [{'a': -1, 'b': +1}, {'a': -1, 'b': -1}]
>>> response = dimod.Response.from_samples(samples, {'energy': [-1, 0]}, {}, dimod.SPIN)
From an array
>>> import dimod
>>> import numpy as np
...
>>> samples = np.ones((2, 3), dtype='int8') # 2 samples, 3 variables
>>> response = dimod.Response.from_samples(samples, {'energy': [-1.0, -1.0]}, {},
... dimod.SPIN, variable_labels=['a', 'b', 'c']) | [
"Build",
"a",
"response",
"from",
"samples",
"."
] | beff1b7f86b559d923ac653c1de6d593876d6d38 | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/response.py#L111-L180 | train | 212,655 |
svenevs/exhale | exhale/graph.py | ExhaleNode.breathe_identifier | def breathe_identifier(self):
"""
The unique identifier for breathe directives.
.. note::
This method is currently assumed to only be called for nodes that are
in :data:`exhale.utils.LEAF_LIKE_KINDS` (see also
:func:`exhale.graph.ExhaleRoot.generateSingleNodeRST` where it is used).
**Return**
:class:`python:str`
Usually, this will just be ``self.name``. However, for functions in
particular the signature must be included to distinguish overloads.
"""
if self.kind == "function":
# TODO: breathe bug with templates and overloads, don't know what to do...
return "{name}({parameters})".format(
name=self.name,
parameters=", ".join(self.parameters)
)
return self.name | python | def breathe_identifier(self):
"""
The unique identifier for breathe directives.
.. note::
This method is currently assumed to only be called for nodes that are
in :data:`exhale.utils.LEAF_LIKE_KINDS` (see also
:func:`exhale.graph.ExhaleRoot.generateSingleNodeRST` where it is used).
**Return**
:class:`python:str`
Usually, this will just be ``self.name``. However, for functions in
particular the signature must be included to distinguish overloads.
"""
if self.kind == "function":
# TODO: breathe bug with templates and overloads, don't know what to do...
return "{name}({parameters})".format(
name=self.name,
parameters=", ".join(self.parameters)
)
return self.name | [
"def",
"breathe_identifier",
"(",
"self",
")",
":",
"if",
"self",
".",
"kind",
"==",
"\"function\"",
":",
"# TODO: breathe bug with templates and overloads, don't know what to do...",
"return",
"\"{name}({parameters})\"",
".",
"format",
"(",
"name",
"=",
"self",
".",
"n... | The unique identifier for breathe directives.
.. note::
This method is currently assumed to only be called for nodes that are
in :data:`exhale.utils.LEAF_LIKE_KINDS` (see also
:func:`exhale.graph.ExhaleRoot.generateSingleNodeRST` where it is used).
**Return**
:class:`python:str`
Usually, this will just be ``self.name``. However, for functions in
particular the signature must be included to distinguish overloads. | [
"The",
"unique",
"identifier",
"for",
"breathe",
"directives",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L231-L254 | train | 212,656 |
svenevs/exhale | exhale/graph.py | ExhaleNode.full_signature | def full_signature(self):
"""
The full signature of a ``"function"`` node.
**Return**
:class:`python:str`
The full signature of the function, including template, return type,
name, and parameter types.
**Raises**
:class:`python:RuntimeError`
If ``self.kind != "function"``.
"""
if self.kind == "function":
return "{template}{return_type} {name}({parameters})".format(
template="template <{0}> ".format(", ".join(self.template)) if self.template else "",
return_type=self.return_type,
name=self.name,
parameters=", ".join(self.parameters)
)
raise RuntimeError(
"full_signature may only be called for a 'function', but {name} is a '{kind}' node.".format(
name=self.name, kind=self.kind
)
) | python | def full_signature(self):
"""
The full signature of a ``"function"`` node.
**Return**
:class:`python:str`
The full signature of the function, including template, return type,
name, and parameter types.
**Raises**
:class:`python:RuntimeError`
If ``self.kind != "function"``.
"""
if self.kind == "function":
return "{template}{return_type} {name}({parameters})".format(
template="template <{0}> ".format(", ".join(self.template)) if self.template else "",
return_type=self.return_type,
name=self.name,
parameters=", ".join(self.parameters)
)
raise RuntimeError(
"full_signature may only be called for a 'function', but {name} is a '{kind}' node.".format(
name=self.name, kind=self.kind
)
) | [
"def",
"full_signature",
"(",
"self",
")",
":",
"if",
"self",
".",
"kind",
"==",
"\"function\"",
":",
"return",
"\"{template}{return_type} {name}({parameters})\"",
".",
"format",
"(",
"template",
"=",
"\"template <{0}> \"",
".",
"format",
"(",
"\", \"",
".",
"join... | The full signature of a ``"function"`` node.
**Return**
:class:`python:str`
The full signature of the function, including template, return type,
name, and parameter types.
**Raises**
:class:`python:RuntimeError`
If ``self.kind != "function"``. | [
"The",
"full",
"signature",
"of",
"a",
"function",
"node",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L256-L280 | train | 212,657 |
svenevs/exhale | exhale/graph.py | ExhaleNode.findNestedNamespaces | def findNestedNamespaces(self, lst):
'''
Recursive helper function for finding nested namespaces. If this node is a
namespace node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedNamespaces`` with the same list.
:Parameters:
``lst`` (list)
The list each namespace node is to be appended to.
'''
if self.kind == "namespace":
lst.append(self)
for c in self.children:
c.findNestedNamespaces(lst) | python | def findNestedNamespaces(self, lst):
'''
Recursive helper function for finding nested namespaces. If this node is a
namespace node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedNamespaces`` with the same list.
:Parameters:
``lst`` (list)
The list each namespace node is to be appended to.
'''
if self.kind == "namespace":
lst.append(self)
for c in self.children:
c.findNestedNamespaces(lst) | [
"def",
"findNestedNamespaces",
"(",
"self",
",",
"lst",
")",
":",
"if",
"self",
".",
"kind",
"==",
"\"namespace\"",
":",
"lst",
".",
"append",
"(",
"self",
")",
"for",
"c",
"in",
"self",
".",
"children",
":",
"c",
".",
"findNestedNamespaces",
"(",
"lst... | Recursive helper function for finding nested namespaces. If this node is a
namespace node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedNamespaces`` with the same list.
:Parameters:
``lst`` (list)
The list each namespace node is to be appended to. | [
"Recursive",
"helper",
"function",
"for",
"finding",
"nested",
"namespaces",
".",
"If",
"this",
"node",
"is",
"a",
"namespace",
"node",
"it",
"is",
"appended",
"to",
"lst",
".",
"Each",
"node",
"also",
"calls",
"each",
"of",
"its",
"child",
"findNestedNamesp... | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L409-L422 | train | 212,658 |
svenevs/exhale | exhale/graph.py | ExhaleNode.findNestedDirectories | def findNestedDirectories(self, lst):
'''
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
'''
if self.kind == "dir":
lst.append(self)
for c in self.children:
c.findNestedDirectories(lst) | python | def findNestedDirectories(self, lst):
'''
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
'''
if self.kind == "dir":
lst.append(self)
for c in self.children:
c.findNestedDirectories(lst) | [
"def",
"findNestedDirectories",
"(",
"self",
",",
"lst",
")",
":",
"if",
"self",
".",
"kind",
"==",
"\"dir\"",
":",
"lst",
".",
"append",
"(",
"self",
")",
"for",
"c",
"in",
"self",
".",
"children",
":",
"c",
".",
"findNestedDirectories",
"(",
"lst",
... | Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to. | [
"Recursive",
"helper",
"function",
"for",
"finding",
"nested",
"directories",
".",
"If",
"this",
"node",
"is",
"a",
"directory",
"node",
"it",
"is",
"appended",
"to",
"lst",
".",
"Each",
"node",
"also",
"calls",
"each",
"of",
"its",
"child",
"findNestedDirec... | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L424-L437 | train | 212,659 |
svenevs/exhale | exhale/graph.py | ExhaleNode.findNestedClassLike | def findNestedClassLike(self, lst):
'''
Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to.
'''
if self.kind == "class" or self.kind == "struct":
lst.append(self)
for c in self.children:
c.findNestedClassLike(lst) | python | def findNestedClassLike(self, lst):
'''
Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to.
'''
if self.kind == "class" or self.kind == "struct":
lst.append(self)
for c in self.children:
c.findNestedClassLike(lst) | [
"def",
"findNestedClassLike",
"(",
"self",
",",
"lst",
")",
":",
"if",
"self",
".",
"kind",
"==",
"\"class\"",
"or",
"self",
".",
"kind",
"==",
"\"struct\"",
":",
"lst",
".",
"append",
"(",
"self",
")",
"for",
"c",
"in",
"self",
".",
"children",
":",... | Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to. | [
"Recursive",
"helper",
"function",
"for",
"finding",
"nested",
"classes",
"and",
"structs",
".",
"If",
"this",
"node",
"is",
"a",
"class",
"or",
"struct",
"it",
"is",
"appended",
"to",
"lst",
".",
"Each",
"node",
"also",
"calls",
"each",
"of",
"its",
"ch... | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L439-L452 | train | 212,660 |
svenevs/exhale | exhale/graph.py | ExhaleRoot.generateDirectoryNodeDocuments | def generateDirectoryNodeDocuments(self):
'''
Generates all of the directory reStructuredText documents.
'''
all_dirs = []
for d in self.dirs:
d.findNestedDirectories(all_dirs)
for d in all_dirs:
self.generateDirectoryNodeRST(d) | python | def generateDirectoryNodeDocuments(self):
'''
Generates all of the directory reStructuredText documents.
'''
all_dirs = []
for d in self.dirs:
d.findNestedDirectories(all_dirs)
for d in all_dirs:
self.generateDirectoryNodeRST(d) | [
"def",
"generateDirectoryNodeDocuments",
"(",
"self",
")",
":",
"all_dirs",
"=",
"[",
"]",
"for",
"d",
"in",
"self",
".",
"dirs",
":",
"d",
".",
"findNestedDirectories",
"(",
"all_dirs",
")",
"for",
"d",
"in",
"all_dirs",
":",
"self",
".",
"generateDirecto... | Generates all of the directory reStructuredText documents. | [
"Generates",
"all",
"of",
"the",
"directory",
"reStructuredText",
"documents",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L3128-L3137 | train | 212,661 |
svenevs/exhale | exhale/graph.py | ExhaleRoot.gerrymanderNodeFilenames | def gerrymanderNodeFilenames(self):
'''
When creating nodes, the filename needs to be relative to ``conf.py``, so it
will include ``self.root_directory``. However, when generating the API, the
file we are writing to is in the same directory as the generated node files so
we need to remove the directory path from a given ExhaleNode's ``file_name``
before we can ``include`` it or use it in a ``toctree``.
'''
for node in self.all_nodes:
node.file_name = os.path.basename(node.file_name)
if node.kind == "file":
node.program_file = os.path.basename(node.program_file) | python | def gerrymanderNodeFilenames(self):
'''
When creating nodes, the filename needs to be relative to ``conf.py``, so it
will include ``self.root_directory``. However, when generating the API, the
file we are writing to is in the same directory as the generated node files so
we need to remove the directory path from a given ExhaleNode's ``file_name``
before we can ``include`` it or use it in a ``toctree``.
'''
for node in self.all_nodes:
node.file_name = os.path.basename(node.file_name)
if node.kind == "file":
node.program_file = os.path.basename(node.program_file) | [
"def",
"gerrymanderNodeFilenames",
"(",
"self",
")",
":",
"for",
"node",
"in",
"self",
".",
"all_nodes",
":",
"node",
".",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"node",
".",
"file_name",
")",
"if",
"node",
".",
"kind",
"==",
"\"fil... | When creating nodes, the filename needs to be relative to ``conf.py``, so it
will include ``self.root_directory``. However, when generating the API, the
file we are writing to is in the same directory as the generated node files so
we need to remove the directory path from a given ExhaleNode's ``file_name``
before we can ``include`` it or use it in a ``toctree``. | [
"When",
"creating",
"nodes",
"the",
"filename",
"needs",
"to",
"be",
"relative",
"to",
"conf",
".",
"py",
"so",
"it",
"will",
"include",
"self",
".",
"root_directory",
".",
"However",
"when",
"generating",
"the",
"API",
"the",
"file",
"we",
"are",
"writing... | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L3375-L3386 | train | 212,662 |
svenevs/exhale | exhale/graph.py | ExhaleRoot.generateClassView | def generateClassView(self):
'''
Generates the class view hierarchy, writing it to ``self.class_hierarchy_file``.
'''
class_view_stream = StringIO()
for n in self.namespaces:
n.toHierarchy(True, 0, class_view_stream)
# Add everything that was not nested in a namespace.
missing = []
# class-like objects (structs and classes)
for cl in sorted(self.class_like):
if not cl.in_class_hierarchy:
missing.append(cl)
# enums
for e in sorted(self.enums):
if not e.in_class_hierarchy:
missing.append(e)
# unions
for u in sorted(self.unions):
if not u.in_class_hierarchy:
missing.append(u)
if len(missing) > 0:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toHierarchy(True, 0, class_view_stream, idx == last_missing_child)
idx += 1
elif configs.createTreeView:
# need to restart since there were no missing children found, otherwise the
# last namespace will not correctly have a lastChild
class_view_stream.close()
class_view_stream = StringIO()
last_nspace_index = len(self.namespaces) - 1
for idx in range(last_nspace_index + 1):
nspace = self.namespaces[idx]
nspace.toHierarchy(True, 0, class_view_stream, idx == last_nspace_index)
# extract the value from the stream and close it down
class_view_string = class_view_stream.getvalue()
class_view_stream.close()
return class_view_string | python | def generateClassView(self):
'''
Generates the class view hierarchy, writing it to ``self.class_hierarchy_file``.
'''
class_view_stream = StringIO()
for n in self.namespaces:
n.toHierarchy(True, 0, class_view_stream)
# Add everything that was not nested in a namespace.
missing = []
# class-like objects (structs and classes)
for cl in sorted(self.class_like):
if not cl.in_class_hierarchy:
missing.append(cl)
# enums
for e in sorted(self.enums):
if not e.in_class_hierarchy:
missing.append(e)
# unions
for u in sorted(self.unions):
if not u.in_class_hierarchy:
missing.append(u)
if len(missing) > 0:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toHierarchy(True, 0, class_view_stream, idx == last_missing_child)
idx += 1
elif configs.createTreeView:
# need to restart since there were no missing children found, otherwise the
# last namespace will not correctly have a lastChild
class_view_stream.close()
class_view_stream = StringIO()
last_nspace_index = len(self.namespaces) - 1
for idx in range(last_nspace_index + 1):
nspace = self.namespaces[idx]
nspace.toHierarchy(True, 0, class_view_stream, idx == last_nspace_index)
# extract the value from the stream and close it down
class_view_string = class_view_stream.getvalue()
class_view_stream.close()
return class_view_string | [
"def",
"generateClassView",
"(",
"self",
")",
":",
"class_view_stream",
"=",
"StringIO",
"(",
")",
"for",
"n",
"in",
"self",
".",
"namespaces",
":",
"n",
".",
"toHierarchy",
"(",
"True",
",",
"0",
",",
"class_view_stream",
")",
"# Add everything that was not n... | Generates the class view hierarchy, writing it to ``self.class_hierarchy_file``. | [
"Generates",
"the",
"class",
"view",
"hierarchy",
"writing",
"it",
"to",
"self",
".",
"class_hierarchy_file",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L3516-L3560 | train | 212,663 |
svenevs/exhale | exhale/graph.py | ExhaleRoot.generateDirectoryView | def generateDirectoryView(self):
'''
Generates the file view hierarchy, writing it to ``self.file_hierarchy_file``.
'''
file_view_stream = StringIO()
for d in self.dirs:
d.toHierarchy(False, 0, file_view_stream)
# add potential missing files (not sure if this is possible though)
missing = []
for f in sorted(self.files):
if not f.in_file_hierarchy:
missing.append(f)
found_missing = len(missing) > 0
if found_missing:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toHierarchy(False, 0, file_view_stream, idx == last_missing_child)
idx += 1
elif configs.createTreeView:
# need to restart since there were no missing children found, otherwise the
# last directory will not correctly have a lastChild
file_view_stream.close()
file_view_stream = StringIO()
last_dir_index = len(self.dirs) - 1
for idx in range(last_dir_index + 1):
curr_d = self.dirs[idx]
curr_d.toHierarchy(False, 0, file_view_stream, idx == last_dir_index)
# extract the value from the stream and close it down
file_view_string = file_view_stream.getvalue()
file_view_stream.close()
return file_view_string | python | def generateDirectoryView(self):
'''
Generates the file view hierarchy, writing it to ``self.file_hierarchy_file``.
'''
file_view_stream = StringIO()
for d in self.dirs:
d.toHierarchy(False, 0, file_view_stream)
# add potential missing files (not sure if this is possible though)
missing = []
for f in sorted(self.files):
if not f.in_file_hierarchy:
missing.append(f)
found_missing = len(missing) > 0
if found_missing:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toHierarchy(False, 0, file_view_stream, idx == last_missing_child)
idx += 1
elif configs.createTreeView:
# need to restart since there were no missing children found, otherwise the
# last directory will not correctly have a lastChild
file_view_stream.close()
file_view_stream = StringIO()
last_dir_index = len(self.dirs) - 1
for idx in range(last_dir_index + 1):
curr_d = self.dirs[idx]
curr_d.toHierarchy(False, 0, file_view_stream, idx == last_dir_index)
# extract the value from the stream and close it down
file_view_string = file_view_stream.getvalue()
file_view_stream.close()
return file_view_string | [
"def",
"generateDirectoryView",
"(",
"self",
")",
":",
"file_view_stream",
"=",
"StringIO",
"(",
")",
"for",
"d",
"in",
"self",
".",
"dirs",
":",
"d",
".",
"toHierarchy",
"(",
"False",
",",
"0",
",",
"file_view_stream",
")",
"# add potential missing files (not... | Generates the file view hierarchy, writing it to ``self.file_hierarchy_file``. | [
"Generates",
"the",
"file",
"view",
"hierarchy",
"writing",
"it",
"to",
"self",
".",
"file_hierarchy_file",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L3562-L3598 | train | 212,664 |
svenevs/exhale | exhale/graph.py | ExhaleRoot.toConsole | def toConsole(self):
'''
Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;)
'''
fmt_spec = {
"class": utils.AnsiColors.BOLD_MAGENTA,
"struct": utils.AnsiColors.BOLD_CYAN,
"define": utils.AnsiColors.BOLD_YELLOW,
"enum": utils.AnsiColors.BOLD_MAGENTA,
"enumvalue": utils.AnsiColors.BOLD_RED, # red means unused in framework
"function": utils.AnsiColors.BOLD_CYAN,
"file": utils.AnsiColors.BOLD_YELLOW,
"dir": utils.AnsiColors.BOLD_MAGENTA,
"group": utils.AnsiColors.BOLD_RED, # red means unused in framework
"namespace": utils.AnsiColors.BOLD_CYAN,
"typedef": utils.AnsiColors.BOLD_YELLOW,
"union": utils.AnsiColors.BOLD_MAGENTA,
"variable": utils.AnsiColors.BOLD_CYAN
}
self.consoleFormat(
"{0} and {1}".format(
utils._use_color("Classes", fmt_spec["class"], sys.stderr),
utils._use_color("Structs", fmt_spec["struct"], sys.stderr),
),
self.class_like,
fmt_spec
)
self.consoleFormat(
utils._use_color("Defines", fmt_spec["define"], sys.stderr),
self.defines,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enums", fmt_spec["enum"], sys.stderr),
self.enums,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enum Values (unused)", fmt_spec["enumvalue"], sys.stderr),
self.enum_values,
fmt_spec
)
self.consoleFormat(
utils._use_color("Functions", fmt_spec["function"], sys.stderr),
self.functions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Files", fmt_spec["file"], sys.stderr),
self.files,
fmt_spec
)
self.consoleFormat(
utils._use_color("Directories", fmt_spec["dir"], sys.stderr),
self.dirs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Groups (unused)", fmt_spec["group"], sys.stderr),
self.groups,
fmt_spec
)
self.consoleFormat(
utils._use_color("Namespaces", fmt_spec["namespace"], sys.stderr),
self.namespaces,
fmt_spec
)
self.consoleFormat(
utils._use_color("Typedefs", fmt_spec["typedef"], sys.stderr),
self.typedefs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Unions", fmt_spec["union"], sys.stderr),
self.unions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Variables", fmt_spec["variable"], sys.stderr),
self.variables,
fmt_spec
) | python | def toConsole(self):
'''
Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;)
'''
fmt_spec = {
"class": utils.AnsiColors.BOLD_MAGENTA,
"struct": utils.AnsiColors.BOLD_CYAN,
"define": utils.AnsiColors.BOLD_YELLOW,
"enum": utils.AnsiColors.BOLD_MAGENTA,
"enumvalue": utils.AnsiColors.BOLD_RED, # red means unused in framework
"function": utils.AnsiColors.BOLD_CYAN,
"file": utils.AnsiColors.BOLD_YELLOW,
"dir": utils.AnsiColors.BOLD_MAGENTA,
"group": utils.AnsiColors.BOLD_RED, # red means unused in framework
"namespace": utils.AnsiColors.BOLD_CYAN,
"typedef": utils.AnsiColors.BOLD_YELLOW,
"union": utils.AnsiColors.BOLD_MAGENTA,
"variable": utils.AnsiColors.BOLD_CYAN
}
self.consoleFormat(
"{0} and {1}".format(
utils._use_color("Classes", fmt_spec["class"], sys.stderr),
utils._use_color("Structs", fmt_spec["struct"], sys.stderr),
),
self.class_like,
fmt_spec
)
self.consoleFormat(
utils._use_color("Defines", fmt_spec["define"], sys.stderr),
self.defines,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enums", fmt_spec["enum"], sys.stderr),
self.enums,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enum Values (unused)", fmt_spec["enumvalue"], sys.stderr),
self.enum_values,
fmt_spec
)
self.consoleFormat(
utils._use_color("Functions", fmt_spec["function"], sys.stderr),
self.functions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Files", fmt_spec["file"], sys.stderr),
self.files,
fmt_spec
)
self.consoleFormat(
utils._use_color("Directories", fmt_spec["dir"], sys.stderr),
self.dirs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Groups (unused)", fmt_spec["group"], sys.stderr),
self.groups,
fmt_spec
)
self.consoleFormat(
utils._use_color("Namespaces", fmt_spec["namespace"], sys.stderr),
self.namespaces,
fmt_spec
)
self.consoleFormat(
utils._use_color("Typedefs", fmt_spec["typedef"], sys.stderr),
self.typedefs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Unions", fmt_spec["union"], sys.stderr),
self.unions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Variables", fmt_spec["variable"], sys.stderr),
self.variables,
fmt_spec
) | [
"def",
"toConsole",
"(",
"self",
")",
":",
"fmt_spec",
"=",
"{",
"\"class\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_MAGENTA",
",",
"\"struct\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_CYAN",
",",
"\"define\"",
":",
"utils",
".",
"AnsiColors",
".... | Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;) | [
"Convenience",
"function",
"for",
"printing",
"out",
"the",
"entire",
"API",
"being",
"generated",
"to",
"the",
"console",
".",
"Unused",
"in",
"the",
"release",
"but",
"is",
"helpful",
"for",
"debugging",
";",
")"
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L3747-L3830 | train | 212,665 |
svenevs/exhale | exhale/utils.py | sanitize | def sanitize(name):
"""
Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace.
"""
return name.replace(
"<", "<"
).replace(
">", ">"
).replace(
"&", "&"
).replace(
"< ", "<"
).replace(
" >", ">"
).replace(
" &", "&"
).replace(
"& ", "&"
) | python | def sanitize(name):
"""
Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace.
"""
return name.replace(
"<", "<"
).replace(
">", ">"
).replace(
"&", "&"
).replace(
"< ", "<"
).replace(
" >", ">"
).replace(
" &", "&"
).replace(
"& ", "&"
) | [
"def",
"sanitize",
"(",
"name",
")",
":",
"return",
"name",
".",
"replace",
"(",
"\"<\"",
",",
"\"<\"",
")",
".",
"replace",
"(",
"\">\"",
",",
"\">\"",
")",
".",
"replace",
"(",
"\"&\"",
",",
"\"&\"",
")",
".",
"replace",
"(",
"\"< \"",
",... | Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace. | [
"Sanitize",
"the",
"specified",
"name",
"for",
"use",
"with",
"breathe",
"directives",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/utils.py#L255-L286 | train | 212,666 |
svenevs/exhale | exhale/utils.py | doxygenLanguageToPygmentsLexer | def doxygenLanguageToPygmentsLexer(location, language):
'''
Given an input location and language specification, acquire the Pygments lexer to
use for this file.
1. If :data:`configs.lexerMapping <exhale.configs.lexerMapping>` has been specified,
then :data:`configs._compiled_lexer_mapping <exhale.configs._compiled_lexer_mapping>`
will be queried first using the ``location`` parameter.
2. If no matching was found, then the appropriate lexer defined in
:data:`LANG_TO_LEX <exhale.utils.LANG_TO_LEX>` is used.
3. If no matching language is found, ``"none"`` is returned (indicating to Pygments
that no syntax highlighting should occur).
'''
if configs._compiled_lexer_mapping:
for regex in configs._compiled_lexer_mapping:
if regex.match(location):
return configs._compiled_lexer_mapping[regex]
if language in LANG_TO_LEX:
return LANG_TO_LEX[language]
return "none" | python | def doxygenLanguageToPygmentsLexer(location, language):
'''
Given an input location and language specification, acquire the Pygments lexer to
use for this file.
1. If :data:`configs.lexerMapping <exhale.configs.lexerMapping>` has been specified,
then :data:`configs._compiled_lexer_mapping <exhale.configs._compiled_lexer_mapping>`
will be queried first using the ``location`` parameter.
2. If no matching was found, then the appropriate lexer defined in
:data:`LANG_TO_LEX <exhale.utils.LANG_TO_LEX>` is used.
3. If no matching language is found, ``"none"`` is returned (indicating to Pygments
that no syntax highlighting should occur).
'''
if configs._compiled_lexer_mapping:
for regex in configs._compiled_lexer_mapping:
if regex.match(location):
return configs._compiled_lexer_mapping[regex]
if language in LANG_TO_LEX:
return LANG_TO_LEX[language]
return "none" | [
"def",
"doxygenLanguageToPygmentsLexer",
"(",
"location",
",",
"language",
")",
":",
"if",
"configs",
".",
"_compiled_lexer_mapping",
":",
"for",
"regex",
"in",
"configs",
".",
"_compiled_lexer_mapping",
":",
"if",
"regex",
".",
"match",
"(",
"location",
")",
":... | Given an input location and language specification, acquire the Pygments lexer to
use for this file.
1. If :data:`configs.lexerMapping <exhale.configs.lexerMapping>` has been specified,
then :data:`configs._compiled_lexer_mapping <exhale.configs._compiled_lexer_mapping>`
will be queried first using the ``location`` parameter.
2. If no matching was found, then the appropriate lexer defined in
:data:`LANG_TO_LEX <exhale.utils.LANG_TO_LEX>` is used.
3. If no matching language is found, ``"none"`` is returned (indicating to Pygments
that no syntax highlighting should occur). | [
"Given",
"an",
"input",
"location",
"and",
"language",
"specification",
"acquire",
"the",
"Pygments",
"lexer",
"to",
"use",
"for",
"this",
"file",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/utils.py#L352-L373 | train | 212,667 |
svenevs/exhale | exhale/parse.py | getBriefAndDetailedRST | def getBriefAndDetailedRST(textRoot, node):
'''
Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this
'''
node_xml_contents = utils.nodeCompoundXMLContents(node)
if not node_xml_contents:
return "", ""
try:
node_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Unable to parse [{0}] xml using BeautifulSoup".format(node.name))
try:
# In the file xml definitions, things such as enums or defines are listed inside
# of <sectiondef> tags, which may have some nested <briefdescription> or
# <detaileddescription> tags. So as long as we make sure not to search
# recursively, then the following will extract the file descriptions only
# process the brief description if provided
brief = node_soup.doxygen.compounddef.find_all("briefdescription", recursive=False)
brief_desc = ""
if len(brief) == 1:
brief = brief[0]
# Empty descriptions will usually get parsed as a single newline, which we
# want to ignore ;)
if not brief.get_text().isspace():
brief_desc = convertDescriptionToRST(textRoot, node, brief, None)
# process the detailed description if provided
detailed = node_soup.doxygen.compounddef.find_all("detaileddescription", recursive=False)
detailed_desc = ""
if len(detailed) == 1:
detailed = detailed[0]
if not detailed.get_text().isspace():
detailed_desc = convertDescriptionToRST(textRoot, node, detailed, "Detailed Description")
return brief_desc, detailed_desc
except:
utils.fancyError(
"Could not acquire soup.doxygen.compounddef; likely not a doxygen xml file."
) | python | def getBriefAndDetailedRST(textRoot, node):
'''
Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this
'''
node_xml_contents = utils.nodeCompoundXMLContents(node)
if not node_xml_contents:
return "", ""
try:
node_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Unable to parse [{0}] xml using BeautifulSoup".format(node.name))
try:
# In the file xml definitions, things such as enums or defines are listed inside
# of <sectiondef> tags, which may have some nested <briefdescription> or
# <detaileddescription> tags. So as long as we make sure not to search
# recursively, then the following will extract the file descriptions only
# process the brief description if provided
brief = node_soup.doxygen.compounddef.find_all("briefdescription", recursive=False)
brief_desc = ""
if len(brief) == 1:
brief = brief[0]
# Empty descriptions will usually get parsed as a single newline, which we
# want to ignore ;)
if not brief.get_text().isspace():
brief_desc = convertDescriptionToRST(textRoot, node, brief, None)
# process the detailed description if provided
detailed = node_soup.doxygen.compounddef.find_all("detaileddescription", recursive=False)
detailed_desc = ""
if len(detailed) == 1:
detailed = detailed[0]
if not detailed.get_text().isspace():
detailed_desc = convertDescriptionToRST(textRoot, node, detailed, "Detailed Description")
return brief_desc, detailed_desc
except:
utils.fancyError(
"Could not acquire soup.doxygen.compounddef; likely not a doxygen xml file."
) | [
"def",
"getBriefAndDetailedRST",
"(",
"textRoot",
",",
"node",
")",
":",
"node_xml_contents",
"=",
"utils",
".",
"nodeCompoundXMLContents",
"(",
"node",
")",
"if",
"not",
"node_xml_contents",
":",
"return",
"\"\"",
",",
"\"\"",
"try",
":",
"node_soup",
"=",
"B... | Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this | [
"Given",
"an",
"input",
"node",
"return",
"a",
"tuple",
"of",
"strings",
"where",
"the",
"first",
"element",
"of",
"the",
"return",
"is",
"the",
"brief",
"description",
"and",
"the",
"second",
"is",
"the",
"detailed",
"description",
"."
] | fe7644829057af622e467bb529db6c03a830da99 | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/parse.py#L201-L245 | train | 212,668 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI._build_url | def _build_url(self, endpoint):
"""
Builds the absolute URL using the target and desired endpoint.
"""
try:
path = self.endpoints[endpoint]
except KeyError:
msg = 'Unknown endpoint `{0}`'
raise ValueError(msg.format(endpoint))
absolute_url = urljoin(self.target, path)
return absolute_url | python | def _build_url(self, endpoint):
"""
Builds the absolute URL using the target and desired endpoint.
"""
try:
path = self.endpoints[endpoint]
except KeyError:
msg = 'Unknown endpoint `{0}`'
raise ValueError(msg.format(endpoint))
absolute_url = urljoin(self.target, path)
return absolute_url | [
"def",
"_build_url",
"(",
"self",
",",
"endpoint",
")",
":",
"try",
":",
"path",
"=",
"self",
".",
"endpoints",
"[",
"endpoint",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"'Unknown endpoint `{0}`'",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
... | Builds the absolute URL using the target and desired endpoint. | [
"Builds",
"the",
"absolute",
"URL",
"using",
"the",
"target",
"and",
"desired",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L51-L61 | train | 212,669 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.add_version | def add_version(self, project, version, egg):
"""
Adds a new project egg to the Scrapyd service. First class, maps to
Scrapyd's add version endpoint.
"""
url = self._build_url(constants.ADD_VERSION_ENDPOINT)
data = {
'project': project,
'version': version
}
files = {
'egg': egg
}
json = self.client.post(url, data=data, files=files,
timeout=self.timeout)
return json['spiders'] | python | def add_version(self, project, version, egg):
"""
Adds a new project egg to the Scrapyd service. First class, maps to
Scrapyd's add version endpoint.
"""
url = self._build_url(constants.ADD_VERSION_ENDPOINT)
data = {
'project': project,
'version': version
}
files = {
'egg': egg
}
json = self.client.post(url, data=data, files=files,
timeout=self.timeout)
return json['spiders'] | [
"def",
"add_version",
"(",
"self",
",",
"project",
",",
"version",
",",
"egg",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"ADD_VERSION_ENDPOINT",
")",
"data",
"=",
"{",
"'project'",
":",
"project",
",",
"'version'",
":",
"ver... | Adds a new project egg to the Scrapyd service. First class, maps to
Scrapyd's add version endpoint. | [
"Adds",
"a",
"new",
"project",
"egg",
"to",
"the",
"Scrapyd",
"service",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"add",
"version",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L63-L78 | train | 212,670 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.cancel | def cancel(self, project, job, signal=None):
"""
Cancels a job from a specific project. First class, maps to
Scrapyd's cancel job endpoint.
"""
url = self._build_url(constants.CANCEL_ENDPOINT)
data = {
'project': project,
'job': job,
}
if signal is not None:
data['signal'] = signal
json = self.client.post(url, data=data, timeout=self.timeout)
return json['prevstate'] | python | def cancel(self, project, job, signal=None):
"""
Cancels a job from a specific project. First class, maps to
Scrapyd's cancel job endpoint.
"""
url = self._build_url(constants.CANCEL_ENDPOINT)
data = {
'project': project,
'job': job,
}
if signal is not None:
data['signal'] = signal
json = self.client.post(url, data=data, timeout=self.timeout)
return json['prevstate'] | [
"def",
"cancel",
"(",
"self",
",",
"project",
",",
"job",
",",
"signal",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"CANCEL_ENDPOINT",
")",
"data",
"=",
"{",
"'project'",
":",
"project",
",",
"'job'",
":",
"j... | Cancels a job from a specific project. First class, maps to
Scrapyd's cancel job endpoint. | [
"Cancels",
"a",
"job",
"from",
"a",
"specific",
"project",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"cancel",
"job",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L80-L93 | train | 212,671 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.delete_project | def delete_project(self, project):
"""
Deletes all versions of a project. First class, maps to Scrapyd's
delete project endpoint.
"""
url = self._build_url(constants.DELETE_PROJECT_ENDPOINT)
data = {
'project': project,
}
self.client.post(url, data=data, timeout=self.timeout)
return True | python | def delete_project(self, project):
"""
Deletes all versions of a project. First class, maps to Scrapyd's
delete project endpoint.
"""
url = self._build_url(constants.DELETE_PROJECT_ENDPOINT)
data = {
'project': project,
}
self.client.post(url, data=data, timeout=self.timeout)
return True | [
"def",
"delete_project",
"(",
"self",
",",
"project",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"DELETE_PROJECT_ENDPOINT",
")",
"data",
"=",
"{",
"'project'",
":",
"project",
",",
"}",
"self",
".",
"client",
".",
"post",
"("... | Deletes all versions of a project. First class, maps to Scrapyd's
delete project endpoint. | [
"Deletes",
"all",
"versions",
"of",
"a",
"project",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"delete",
"project",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L95-L105 | train | 212,672 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.delete_version | def delete_version(self, project, version):
"""
Deletes a specific version of a project. First class, maps to
Scrapyd's delete version endpoint.
"""
url = self._build_url(constants.DELETE_VERSION_ENDPOINT)
data = {
'project': project,
'version': version
}
self.client.post(url, data=data, timeout=self.timeout)
return True | python | def delete_version(self, project, version):
"""
Deletes a specific version of a project. First class, maps to
Scrapyd's delete version endpoint.
"""
url = self._build_url(constants.DELETE_VERSION_ENDPOINT)
data = {
'project': project,
'version': version
}
self.client.post(url, data=data, timeout=self.timeout)
return True | [
"def",
"delete_version",
"(",
"self",
",",
"project",
",",
"version",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"DELETE_VERSION_ENDPOINT",
")",
"data",
"=",
"{",
"'project'",
":",
"project",
",",
"'version'",
":",
"version",
"... | Deletes a specific version of a project. First class, maps to
Scrapyd's delete version endpoint. | [
"Deletes",
"a",
"specific",
"version",
"of",
"a",
"project",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"delete",
"version",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L107-L118 | train | 212,673 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.job_status | def job_status(self, project, job_id):
"""
Retrieves the 'status' of a specific job specified by its id. Derived,
utilises Scrapyd's list jobs endpoint to provide the answer.
"""
all_jobs = self.list_jobs(project)
for state in constants.JOB_STATES:
job_ids = [job['id'] for job in all_jobs[state]]
if job_id in job_ids:
return state
return '' | python | def job_status(self, project, job_id):
"""
Retrieves the 'status' of a specific job specified by its id. Derived,
utilises Scrapyd's list jobs endpoint to provide the answer.
"""
all_jobs = self.list_jobs(project)
for state in constants.JOB_STATES:
job_ids = [job['id'] for job in all_jobs[state]]
if job_id in job_ids:
return state
return '' | [
"def",
"job_status",
"(",
"self",
",",
"project",
",",
"job_id",
")",
":",
"all_jobs",
"=",
"self",
".",
"list_jobs",
"(",
"project",
")",
"for",
"state",
"in",
"constants",
".",
"JOB_STATES",
":",
"job_ids",
"=",
"[",
"job",
"[",
"'id'",
"]",
"for",
... | Retrieves the 'status' of a specific job specified by its id. Derived,
utilises Scrapyd's list jobs endpoint to provide the answer. | [
"Retrieves",
"the",
"status",
"of",
"a",
"specific",
"job",
"specified",
"by",
"its",
"id",
".",
"Derived",
"utilises",
"Scrapyd",
"s",
"list",
"jobs",
"endpoint",
"to",
"provide",
"the",
"answer",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L120-L130 | train | 212,674 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.list_jobs | def list_jobs(self, project):
"""
Lists all known jobs for a project. First class, maps to Scrapyd's
list jobs endpoint.
"""
url = self._build_url(constants.LIST_JOBS_ENDPOINT)
params = {'project': project}
jobs = self.client.get(url, params=params, timeout=self.timeout)
return jobs | python | def list_jobs(self, project):
"""
Lists all known jobs for a project. First class, maps to Scrapyd's
list jobs endpoint.
"""
url = self._build_url(constants.LIST_JOBS_ENDPOINT)
params = {'project': project}
jobs = self.client.get(url, params=params, timeout=self.timeout)
return jobs | [
"def",
"list_jobs",
"(",
"self",
",",
"project",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"LIST_JOBS_ENDPOINT",
")",
"params",
"=",
"{",
"'project'",
":",
"project",
"}",
"jobs",
"=",
"self",
".",
"client",
".",
"get",
"(... | Lists all known jobs for a project. First class, maps to Scrapyd's
list jobs endpoint. | [
"Lists",
"all",
"known",
"jobs",
"for",
"a",
"project",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"list",
"jobs",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L132-L140 | train | 212,675 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.list_projects | def list_projects(self):
"""
Lists all deployed projects. First class, maps to Scrapyd's
list projects endpoint.
"""
url = self._build_url(constants.LIST_PROJECTS_ENDPOINT)
json = self.client.get(url, timeout=self.timeout)
return json['projects'] | python | def list_projects(self):
"""
Lists all deployed projects. First class, maps to Scrapyd's
list projects endpoint.
"""
url = self._build_url(constants.LIST_PROJECTS_ENDPOINT)
json = self.client.get(url, timeout=self.timeout)
return json['projects'] | [
"def",
"list_projects",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"LIST_PROJECTS_ENDPOINT",
")",
"json",
"=",
"self",
".",
"client",
".",
"get",
"(",
"url",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"r... | Lists all deployed projects. First class, maps to Scrapyd's
list projects endpoint. | [
"Lists",
"all",
"deployed",
"projects",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"list",
"projects",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L142-L149 | train | 212,676 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.list_spiders | def list_spiders(self, project):
"""
Lists all known spiders for a specific project. First class, maps
to Scrapyd's list spiders endpoint.
"""
url = self._build_url(constants.LIST_SPIDERS_ENDPOINT)
params = {'project': project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json['spiders'] | python | def list_spiders(self, project):
"""
Lists all known spiders for a specific project. First class, maps
to Scrapyd's list spiders endpoint.
"""
url = self._build_url(constants.LIST_SPIDERS_ENDPOINT)
params = {'project': project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json['spiders'] | [
"def",
"list_spiders",
"(",
"self",
",",
"project",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"LIST_SPIDERS_ENDPOINT",
")",
"params",
"=",
"{",
"'project'",
":",
"project",
"}",
"json",
"=",
"self",
".",
"client",
".",
"get"... | Lists all known spiders for a specific project. First class, maps
to Scrapyd's list spiders endpoint. | [
"Lists",
"all",
"known",
"spiders",
"for",
"a",
"specific",
"project",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"list",
"spiders",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L151-L159 | train | 212,677 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.list_versions | def list_versions(self, project):
"""
Lists all deployed versions of a specific project. First class, maps
to Scrapyd's list versions endpoint.
"""
url = self._build_url(constants.LIST_VERSIONS_ENDPOINT)
params = {'project': project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json['versions'] | python | def list_versions(self, project):
"""
Lists all deployed versions of a specific project. First class, maps
to Scrapyd's list versions endpoint.
"""
url = self._build_url(constants.LIST_VERSIONS_ENDPOINT)
params = {'project': project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json['versions'] | [
"def",
"list_versions",
"(",
"self",
",",
"project",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"LIST_VERSIONS_ENDPOINT",
")",
"params",
"=",
"{",
"'project'",
":",
"project",
"}",
"json",
"=",
"self",
".",
"client",
".",
"ge... | Lists all deployed versions of a specific project. First class, maps
to Scrapyd's list versions endpoint. | [
"Lists",
"all",
"deployed",
"versions",
"of",
"a",
"specific",
"project",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"list",
"versions",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L161-L169 | train | 212,678 |
djm/python-scrapyd-api | scrapyd_api/wrapper.py | ScrapydAPI.schedule | def schedule(self, project, spider, settings=None, **kwargs):
"""
Schedules a spider from a specific project to run. First class, maps
to Scrapyd's scheduling endpoint.
"""
url = self._build_url(constants.SCHEDULE_ENDPOINT)
data = {
'project': project,
'spider': spider
}
data.update(kwargs)
if settings:
setting_params = []
for setting_name, value in iteritems(settings):
setting_params.append('{0}={1}'.format(setting_name, value))
data['setting'] = setting_params
json = self.client.post(url, data=data, timeout=self.timeout)
return json['jobid'] | python | def schedule(self, project, spider, settings=None, **kwargs):
"""
Schedules a spider from a specific project to run. First class, maps
to Scrapyd's scheduling endpoint.
"""
url = self._build_url(constants.SCHEDULE_ENDPOINT)
data = {
'project': project,
'spider': spider
}
data.update(kwargs)
if settings:
setting_params = []
for setting_name, value in iteritems(settings):
setting_params.append('{0}={1}'.format(setting_name, value))
data['setting'] = setting_params
json = self.client.post(url, data=data, timeout=self.timeout)
return json['jobid'] | [
"def",
"schedule",
"(",
"self",
",",
"project",
",",
"spider",
",",
"settings",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"constants",
".",
"SCHEDULE_ENDPOINT",
")",
"data",
"=",
"{",
"'project'",
":",
... | Schedules a spider from a specific project to run. First class, maps
to Scrapyd's scheduling endpoint. | [
"Schedules",
"a",
"spider",
"from",
"a",
"specific",
"project",
"to",
"run",
".",
"First",
"class",
"maps",
"to",
"Scrapyd",
"s",
"scheduling",
"endpoint",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L171-L189 | train | 212,679 |
djm/python-scrapyd-api | scrapyd_api/client.py | Client._handle_response | def _handle_response(self, response):
"""
Handles the response received from Scrapyd.
"""
if not response.ok:
raise ScrapydResponseError(
"Scrapyd returned a {0} error: {1}".format(
response.status_code,
response.text))
try:
json = response.json()
except ValueError:
raise ScrapydResponseError("Scrapyd returned an invalid JSON "
"response: {0}".format(response.text))
if json['status'] == 'ok':
json.pop('status')
return json
elif json['status'] == 'error':
raise ScrapydResponseError(json['message']) | python | def _handle_response(self, response):
"""
Handles the response received from Scrapyd.
"""
if not response.ok:
raise ScrapydResponseError(
"Scrapyd returned a {0} error: {1}".format(
response.status_code,
response.text))
try:
json = response.json()
except ValueError:
raise ScrapydResponseError("Scrapyd returned an invalid JSON "
"response: {0}".format(response.text))
if json['status'] == 'ok':
json.pop('status')
return json
elif json['status'] == 'error':
raise ScrapydResponseError(json['message']) | [
"def",
"_handle_response",
"(",
"self",
",",
"response",
")",
":",
"if",
"not",
"response",
".",
"ok",
":",
"raise",
"ScrapydResponseError",
"(",
"\"Scrapyd returned a {0} error: {1}\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"... | Handles the response received from Scrapyd. | [
"Handles",
"the",
"response",
"received",
"from",
"Scrapyd",
"."
] | 42f287cf83c3a5bd46795f4f85cce02a56829921 | https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/client.py#L15-L34 | train | 212,680 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.all | def all(self):
r"""Returns all content in this node, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \newcommand{reverseconcat}[3]{#3#2#1}
... ''')
>>> list(soup.all)
['\n', \newcommand{reverseconcat}[3]{#3#2#1}, '\n']
"""
for child in self.expr.all:
if isinstance(child, TexExpr):
node = TexNode(child)
node.parent = self
yield node
else:
yield child | python | def all(self):
r"""Returns all content in this node, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \newcommand{reverseconcat}[3]{#3#2#1}
... ''')
>>> list(soup.all)
['\n', \newcommand{reverseconcat}[3]{#3#2#1}, '\n']
"""
for child in self.expr.all:
if isinstance(child, TexExpr):
node = TexNode(child)
node.parent = self
yield node
else:
yield child | [
"def",
"all",
"(",
"self",
")",
":",
"for",
"child",
"in",
"self",
".",
"expr",
".",
"all",
":",
"if",
"isinstance",
"(",
"child",
",",
"TexExpr",
")",
":",
"node",
"=",
"TexNode",
"(",
"child",
")",
"node",
".",
"parent",
"=",
"self",
"yield",
"... | r"""Returns all content in this node, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \newcommand{reverseconcat}[3]{#3#2#1}
... ''')
>>> list(soup.all)
['\n', \newcommand{reverseconcat}[3]{#3#2#1}, '\n'] | [
"r",
"Returns",
"all",
"content",
"in",
"this",
"node",
"regardless",
"of",
"whitespace",
"or",
"not",
".",
"This",
"includes",
"all",
"LaTeX",
"needed",
"to",
"reconstruct",
"the",
"original",
"source",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L96-L113 | train | 212,681 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.children | def children(self):
r"""Immediate children of this TeX element that are valid TeX objects.
This is equivalent to contents, excluding text elements and keeping only
Tex expressions.
:return: generator of all children
:rtype: Iterator[TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... Random text!
... \item Hello
... \end{itemize}''')
>>> next(soup.itemize.children)
\item Hello
<BLANKLINE>
"""
for child in self.expr.children:
node = TexNode(child)
node.parent = self
yield node | python | def children(self):
r"""Immediate children of this TeX element that are valid TeX objects.
This is equivalent to contents, excluding text elements and keeping only
Tex expressions.
:return: generator of all children
:rtype: Iterator[TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... Random text!
... \item Hello
... \end{itemize}''')
>>> next(soup.itemize.children)
\item Hello
<BLANKLINE>
"""
for child in self.expr.children:
node = TexNode(child)
node.parent = self
yield node | [
"def",
"children",
"(",
"self",
")",
":",
"for",
"child",
"in",
"self",
".",
"expr",
".",
"children",
":",
"node",
"=",
"TexNode",
"(",
"child",
")",
"node",
".",
"parent",
"=",
"self",
"yield",
"node"
] | r"""Immediate children of this TeX element that are valid TeX objects.
This is equivalent to contents, excluding text elements and keeping only
Tex expressions.
:return: generator of all children
:rtype: Iterator[TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... Random text!
... \item Hello
... \end{itemize}''')
>>> next(soup.itemize.children)
\item Hello
<BLANKLINE> | [
"r",
"Immediate",
"children",
"of",
"this",
"TeX",
"element",
"that",
"are",
"valid",
"TeX",
"objects",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L137-L159 | train | 212,682 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.string | def string(self):
r"""This is valid if and only if
1. the expression is a :class:`.TexCmd` AND
2. the command has only one argument.
:rtype: Union[None,str]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textbf{Hello}''')
>>> soup.textbf.string
'Hello'
>>> soup.textbf.string = 'Hello World'
>>> soup.textbf.string
'Hello World'
>>> soup.textbf
\textbf{Hello World}
"""
if isinstance(self.expr, TexCmd) and len(self.expr.args) == 1:
return self.expr.args[0].value | python | def string(self):
r"""This is valid if and only if
1. the expression is a :class:`.TexCmd` AND
2. the command has only one argument.
:rtype: Union[None,str]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textbf{Hello}''')
>>> soup.textbf.string
'Hello'
>>> soup.textbf.string = 'Hello World'
>>> soup.textbf.string
'Hello World'
>>> soup.textbf
\textbf{Hello World}
"""
if isinstance(self.expr, TexCmd) and len(self.expr.args) == 1:
return self.expr.args[0].value | [
"def",
"string",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"expr",
",",
"TexCmd",
")",
"and",
"len",
"(",
"self",
".",
"expr",
".",
"args",
")",
"==",
"1",
":",
"return",
"self",
".",
"expr",
".",
"args",
"[",
"0",
"]",
".",
... | r"""This is valid if and only if
1. the expression is a :class:`.TexCmd` AND
2. the command has only one argument.
:rtype: Union[None,str]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textbf{Hello}''')
>>> soup.textbf.string
'Hello'
>>> soup.textbf.string = 'Hello World'
>>> soup.textbf.string
'Hello World'
>>> soup.textbf
\textbf{Hello World} | [
"r",
"This",
"is",
"valid",
"if",
"and",
"only",
"if"
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L228-L247 | train | 212,683 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.text | def text(self):
r"""All text in descendant nodes.
This is equivalent to contents, keeping text elements and excluding
Tex expressions.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... \begin{itemize}
... \item Nested
... \end{itemize}
... \end{itemize}''')
>>> next(soup.text)
'Nested\n '
"""
for descendant in self.contents:
if isinstance(descendant, TokenWithPosition):
yield descendant
elif hasattr(descendant, 'text'):
yield from descendant.text | python | def text(self):
r"""All text in descendant nodes.
This is equivalent to contents, keeping text elements and excluding
Tex expressions.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... \begin{itemize}
... \item Nested
... \end{itemize}
... \end{itemize}''')
>>> next(soup.text)
'Nested\n '
"""
for descendant in self.contents:
if isinstance(descendant, TokenWithPosition):
yield descendant
elif hasattr(descendant, 'text'):
yield from descendant.text | [
"def",
"text",
"(",
"self",
")",
":",
"for",
"descendant",
"in",
"self",
".",
"contents",
":",
"if",
"isinstance",
"(",
"descendant",
",",
"TokenWithPosition",
")",
":",
"yield",
"descendant",
"elif",
"hasattr",
"(",
"descendant",
",",
"'text'",
")",
":",
... | r"""All text in descendant nodes.
This is equivalent to contents, keeping text elements and excluding
Tex expressions.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \begin{itemize}
... \begin{itemize}
... \item Nested
... \end{itemize}
... \end{itemize}''')
>>> next(soup.text)
'Nested\n ' | [
"r",
"All",
"text",
"in",
"descendant",
"nodes",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L258-L278 | train | 212,684 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.count | def count(self, name=None, **attrs):
r"""Number of descendants matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: number of matching expressions
:rtype: int
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Hey}
... \textit{Silly}
... \textit{Willy}''')
>>> soup.count('section')
1
>>> soup.count('textit')
2
"""
return len(list(self.find_all(name, **attrs))) | python | def count(self, name=None, **attrs):
r"""Number of descendants matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: number of matching expressions
:rtype: int
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Hey}
... \textit{Silly}
... \textit{Willy}''')
>>> soup.count('section')
1
>>> soup.count('textit')
2
"""
return len(list(self.find_all(name, **attrs))) | [
"def",
"count",
"(",
"self",
",",
"name",
"=",
"None",
",",
"*",
"*",
"attrs",
")",
":",
"return",
"len",
"(",
"list",
"(",
"self",
".",
"find_all",
"(",
"name",
",",
"*",
"*",
"attrs",
")",
")",
")"
] | r"""Number of descendants matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: number of matching expressions
:rtype: int
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Hey}
... \textit{Silly}
... \textit{Willy}''')
>>> soup.count('section')
1
>>> soup.count('textit')
2 | [
"r",
"Number",
"of",
"descendants",
"matching",
"criteria",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L366-L384 | train | 212,685 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.delete | def delete(self):
r"""Delete this node from the parse tree.
Where applicable, this will remove all descendants of this node from
the parse tree.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textit{\color{blue}{Silly}}\textit{keep me!}''')
>>> soup.textit.color.delete()
>>> soup
\textit{}\textit{keep me!}
>>> soup.textit.delete()
>>> soup
\textit{keep me!}
"""
# TODO: needs better abstraction for supports contents
parent = self.parent
if parent.expr._supports_contents():
parent.remove(self)
return
# TODO: needs abstraction for removing from arg
for arg in parent.args:
if self.expr in arg.contents:
arg.contents.remove(self.expr) | python | def delete(self):
r"""Delete this node from the parse tree.
Where applicable, this will remove all descendants of this node from
the parse tree.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textit{\color{blue}{Silly}}\textit{keep me!}''')
>>> soup.textit.color.delete()
>>> soup
\textit{}\textit{keep me!}
>>> soup.textit.delete()
>>> soup
\textit{keep me!}
"""
# TODO: needs better abstraction for supports contents
parent = self.parent
if parent.expr._supports_contents():
parent.remove(self)
return
# TODO: needs abstraction for removing from arg
for arg in parent.args:
if self.expr in arg.contents:
arg.contents.remove(self.expr) | [
"def",
"delete",
"(",
"self",
")",
":",
"# TODO: needs better abstraction for supports contents",
"parent",
"=",
"self",
".",
"parent",
"if",
"parent",
".",
"expr",
".",
"_supports_contents",
"(",
")",
":",
"parent",
".",
"remove",
"(",
"self",
")",
"return",
... | r"""Delete this node from the parse tree.
Where applicable, this will remove all descendants of this node from
the parse tree.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textit{\color{blue}{Silly}}\textit{keep me!}''')
>>> soup.textit.color.delete()
>>> soup
\textit{}\textit{keep me!}
>>> soup.textit.delete()
>>> soup
\textit{keep me!} | [
"r",
"Delete",
"this",
"node",
"from",
"the",
"parse",
"tree",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L386-L411 | train | 212,686 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.find | def find(self, name=None, **attrs):
r"""First descendant node matching criteria.
Returns None if no descendant node found.
:return: descendant node matching criteria
:rtype: Union[None,TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> soup.find('textit')
\textit{eee}
>>> soup.find('textbf')
"""
try:
return next(self.find_all(name, **attrs))
except StopIteration:
return None | python | def find(self, name=None, **attrs):
r"""First descendant node matching criteria.
Returns None if no descendant node found.
:return: descendant node matching criteria
:rtype: Union[None,TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> soup.find('textit')
\textit{eee}
>>> soup.find('textbf')
"""
try:
return next(self.find_all(name, **attrs))
except StopIteration:
return None | [
"def",
"find",
"(",
"self",
",",
"name",
"=",
"None",
",",
"*",
"*",
"attrs",
")",
":",
"try",
":",
"return",
"next",
"(",
"self",
".",
"find_all",
"(",
"name",
",",
"*",
"*",
"attrs",
")",
")",
"except",
"StopIteration",
":",
"return",
"None"
] | r"""First descendant node matching criteria.
Returns None if no descendant node found.
:return: descendant node matching criteria
:rtype: Union[None,TexExpr]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> soup.find('textit')
\textit{eee}
>>> soup.find('textbf') | [
"r",
"First",
"descendant",
"node",
"matching",
"criteria",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L413-L433 | train | 212,687 |
alvinwan/TexSoup | TexSoup/data.py | TexNode.find_all | def find_all(self, name=None, **attrs):
r"""Return all descendant nodes matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: All descendant nodes matching criteria
:rtype: Iterator[TexNode]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> gen = soup.find_all('textit')
>>> next(gen)
\textit{eee}
>>> next(gen)
\textit{ooo}
>>> next(soup.find_all('textbf'))
Traceback (most recent call last):
...
StopIteration
"""
for descendant in self.__descendants():
if hasattr(descendant, '__match__') and \
descendant.__match__(name, attrs):
yield descendant | python | def find_all(self, name=None, **attrs):
r"""Return all descendant nodes matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: All descendant nodes matching criteria
:rtype: Iterator[TexNode]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> gen = soup.find_all('textit')
>>> next(gen)
\textit{eee}
>>> next(gen)
\textit{ooo}
>>> next(soup.find_all('textbf'))
Traceback (most recent call last):
...
StopIteration
"""
for descendant in self.__descendants():
if hasattr(descendant, '__match__') and \
descendant.__match__(name, attrs):
yield descendant | [
"def",
"find_all",
"(",
"self",
",",
"name",
"=",
"None",
",",
"*",
"*",
"attrs",
")",
":",
"for",
"descendant",
"in",
"self",
".",
"__descendants",
"(",
")",
":",
"if",
"hasattr",
"(",
"descendant",
",",
"'__match__'",
")",
"and",
"descendant",
".",
... | r"""Return all descendant nodes matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: All descendant nodes matching criteria
:rtype: Iterator[TexNode]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> gen = soup.find_all('textit')
>>> next(gen)
\textit{eee}
>>> next(gen)
\textit{ooo}
>>> next(soup.find_all('textbf'))
Traceback (most recent call last):
...
StopIteration | [
"r",
"Return",
"all",
"descendant",
"nodes",
"matching",
"criteria",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L435-L461 | train | 212,688 |
alvinwan/TexSoup | TexSoup/data.py | TexExpr.all | def all(self):
r"""Returns all content in this expression, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr1.all) == list(expr2.all)
True
"""
for arg in self.args:
for expr in arg:
yield expr
for content in self._contents:
yield content | python | def all(self):
r"""Returns all content in this expression, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr1.all) == list(expr2.all)
True
"""
for arg in self.args:
for expr in arg:
yield expr
for content in self._contents:
yield content | [
"def",
"all",
"(",
"self",
")",
":",
"for",
"arg",
"in",
"self",
".",
"args",
":",
"for",
"expr",
"in",
"arg",
":",
"yield",
"expr",
"for",
"content",
"in",
"self",
".",
"_contents",
":",
"yield",
"content"
] | r"""Returns all content in this expression, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr1.all) == list(expr2.all)
True | [
"r",
"Returns",
"all",
"content",
"in",
"this",
"expression",
"regardless",
"of",
"whitespace",
"or",
"not",
".",
"This",
"includes",
"all",
"LaTeX",
"needed",
"to",
"reconstruct",
"the",
"original",
"source",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L582-L595 | train | 212,689 |
alvinwan/TexSoup | TexSoup/data.py | TexExpr.contents | def contents(self):
r"""Returns all contents in this expression.
Optionally includes whitespace if set when node was created.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> list(expr1.contents)
['hi']
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr2.contents)
['\n', 'hi']
"""
for content in self.all:
is_whitespace = isinstance(content, str) and content.isspace()
if not is_whitespace or self.preserve_whitespace:
yield content | python | def contents(self):
r"""Returns all contents in this expression.
Optionally includes whitespace if set when node was created.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> list(expr1.contents)
['hi']
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr2.contents)
['\n', 'hi']
"""
for content in self.all:
is_whitespace = isinstance(content, str) and content.isspace()
if not is_whitespace or self.preserve_whitespace:
yield content | [
"def",
"contents",
"(",
"self",
")",
":",
"for",
"content",
"in",
"self",
".",
"all",
":",
"is_whitespace",
"=",
"isinstance",
"(",
"content",
",",
"str",
")",
"and",
"content",
".",
"isspace",
"(",
")",
"if",
"not",
"is_whitespace",
"or",
"self",
".",... | r"""Returns all contents in this expression.
Optionally includes whitespace if set when node was created.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> list(expr1.contents)
['hi']
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr2.contents)
['\n', 'hi'] | [
"r",
"Returns",
"all",
"contents",
"in",
"this",
"expression",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L602-L617 | train | 212,690 |
alvinwan/TexSoup | TexSoup/data.py | TexExpr.tokens | def tokens(self):
"""Further breaks down all tokens for a particular expression into
words and other expressions.
>>> tex = TexEnv('lstlisting', ('var x = 10',))
>>> list(tex.tokens)
['var x = 10']
"""
for content in self.contents:
if isinstance(content, TokenWithPosition):
for word in content.split():
yield word
else:
yield content | python | def tokens(self):
"""Further breaks down all tokens for a particular expression into
words and other expressions.
>>> tex = TexEnv('lstlisting', ('var x = 10',))
>>> list(tex.tokens)
['var x = 10']
"""
for content in self.contents:
if isinstance(content, TokenWithPosition):
for word in content.split():
yield word
else:
yield content | [
"def",
"tokens",
"(",
"self",
")",
":",
"for",
"content",
"in",
"self",
".",
"contents",
":",
"if",
"isinstance",
"(",
"content",
",",
"TokenWithPosition",
")",
":",
"for",
"word",
"in",
"content",
".",
"split",
"(",
")",
":",
"yield",
"word",
"else",
... | Further breaks down all tokens for a particular expression into
words and other expressions.
>>> tex = TexEnv('lstlisting', ('var x = 10',))
>>> list(tex.tokens)
['var x = 10'] | [
"Further",
"breaks",
"down",
"all",
"tokens",
"for",
"a",
"particular",
"expression",
"into",
"words",
"and",
"other",
"expressions",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L620-L633 | train | 212,691 |
alvinwan/TexSoup | TexSoup/data.py | TexExpr.insert | def insert(self, i, *exprs):
"""Insert content at specified position into expression.
:param int i: Position to add content to
:param Union[TexExpr,str] exprs: List of contents to add
>>> expr = TexExpr('textbf', ('hello',))
>>> expr
TexExpr('textbf', ['hello'])
>>> expr.insert(0, 'world')
>>> expr
TexExpr('textbf', ['world', 'hello'])
"""
self._assert_supports_contents()
for j, expr in enumerate(exprs):
self._contents.insert(i + j, expr) | python | def insert(self, i, *exprs):
"""Insert content at specified position into expression.
:param int i: Position to add content to
:param Union[TexExpr,str] exprs: List of contents to add
>>> expr = TexExpr('textbf', ('hello',))
>>> expr
TexExpr('textbf', ['hello'])
>>> expr.insert(0, 'world')
>>> expr
TexExpr('textbf', ['world', 'hello'])
"""
self._assert_supports_contents()
for j, expr in enumerate(exprs):
self._contents.insert(i + j, expr) | [
"def",
"insert",
"(",
"self",
",",
"i",
",",
"*",
"exprs",
")",
":",
"self",
".",
"_assert_supports_contents",
"(",
")",
"for",
"j",
",",
"expr",
"in",
"enumerate",
"(",
"exprs",
")",
":",
"self",
".",
"_contents",
".",
"insert",
"(",
"i",
"+",
"j"... | Insert content at specified position into expression.
:param int i: Position to add content to
:param Union[TexExpr,str] exprs: List of contents to add
>>> expr = TexExpr('textbf', ('hello',))
>>> expr
TexExpr('textbf', ['hello'])
>>> expr.insert(0, 'world')
>>> expr
TexExpr('textbf', ['world', 'hello']) | [
"Insert",
"content",
"at",
"specified",
"position",
"into",
"expression",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L654-L669 | train | 212,692 |
alvinwan/TexSoup | TexSoup/data.py | TexExpr.remove | def remove(self, expr):
"""Remove a provided expression from its list of contents.
:param Union[TexExpr,str] expr: Content to add
:return: index of the expression removed
:rtype: int
>>> expr = TexExpr('textbf', ('hello',))
>>> expr.remove('hello')
0
>>> expr
TexExpr('textbf', [])
"""
self._assert_supports_contents()
index = self._contents.index(expr)
self._contents.remove(expr)
return index | python | def remove(self, expr):
"""Remove a provided expression from its list of contents.
:param Union[TexExpr,str] expr: Content to add
:return: index of the expression removed
:rtype: int
>>> expr = TexExpr('textbf', ('hello',))
>>> expr.remove('hello')
0
>>> expr
TexExpr('textbf', [])
"""
self._assert_supports_contents()
index = self._contents.index(expr)
self._contents.remove(expr)
return index | [
"def",
"remove",
"(",
"self",
",",
"expr",
")",
":",
"self",
".",
"_assert_supports_contents",
"(",
")",
"index",
"=",
"self",
".",
"_contents",
".",
"index",
"(",
"expr",
")",
"self",
".",
"_contents",
".",
"remove",
"(",
"expr",
")",
"return",
"index... | Remove a provided expression from its list of contents.
:param Union[TexExpr,str] expr: Content to add
:return: index of the expression removed
:rtype: int
>>> expr = TexExpr('textbf', ('hello',))
>>> expr.remove('hello')
0
>>> expr
TexExpr('textbf', []) | [
"Remove",
"a",
"provided",
"expression",
"from",
"its",
"list",
"of",
"contents",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L671-L687 | train | 212,693 |
alvinwan/TexSoup | TexSoup/data.py | Arg.parse | def parse(s):
"""Parse a string or list and return an Argument object
:param Union[str,iterable] s: Either a string or a list, where the first and
last elements are valid argument delimiters.
>>> Arg.parse(RArg('arg0'))
RArg('arg0')
>>> Arg.parse('[arg0]')
OArg('arg0')
"""
if isinstance(s, arg_type):
return s
if isinstance(s, (list, tuple)):
for arg in arg_type:
if [s[0], s[-1]] == arg.delims():
return arg(*s[1:-1])
raise TypeError('Malformed argument. First and last elements must '
'match a valid argument format. In this case, TexSoup'
' could not find matching punctuation for: %s.\n'
'Common issues include: Unescaped special characters,'
' mistyped closing punctuation, misalignment.' % (str(s)))
for arg in arg_type:
if arg.__is__(s):
return arg(arg.__strip__(s))
raise TypeError('Malformed argument. Must be an Arg or a string in '
'either brackets or curly braces.') | python | def parse(s):
"""Parse a string or list and return an Argument object
:param Union[str,iterable] s: Either a string or a list, where the first and
last elements are valid argument delimiters.
>>> Arg.parse(RArg('arg0'))
RArg('arg0')
>>> Arg.parse('[arg0]')
OArg('arg0')
"""
if isinstance(s, arg_type):
return s
if isinstance(s, (list, tuple)):
for arg in arg_type:
if [s[0], s[-1]] == arg.delims():
return arg(*s[1:-1])
raise TypeError('Malformed argument. First and last elements must '
'match a valid argument format. In this case, TexSoup'
' could not find matching punctuation for: %s.\n'
'Common issues include: Unescaped special characters,'
' mistyped closing punctuation, misalignment.' % (str(s)))
for arg in arg_type:
if arg.__is__(s):
return arg(arg.__strip__(s))
raise TypeError('Malformed argument. Must be an Arg or a string in '
'either brackets or curly braces.') | [
"def",
"parse",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"arg_type",
")",
":",
"return",
"s",
"if",
"isinstance",
"(",
"s",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"arg",
"in",
"arg_type",
":",
"if",
"[",
"s",
"[",
... | Parse a string or list and return an Argument object
:param Union[str,iterable] s: Either a string or a list, where the first and
last elements are valid argument delimiters.
>>> Arg.parse(RArg('arg0'))
RArg('arg0')
>>> Arg.parse('[arg0]')
OArg('arg0') | [
"Parse",
"a",
"string",
"or",
"list",
"and",
"return",
"an",
"Argument",
"object"
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L849-L875 | train | 212,694 |
alvinwan/TexSoup | TexSoup/data.py | TexArgs.insert | def insert(self, i, arg):
r"""Insert whitespace, an unparsed argument string, or an argument
object.
:param int i: Index to insert argument into
:param Arg arg: Argument to insert
>>> arguments = TexArgs(['\n', RArg('arg0'), '[arg2]'])
>>> arguments.insert(1, '[arg1]')
>>> len(arguments)
3
>>> arguments
[RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.all
['\n', RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.insert(10, '[arg3]')
>>> arguments[3]
OArg('arg3')
"""
arg = self.__coerce(arg)
if isinstance(arg, Arg):
super().insert(i, arg)
if len(self) <= 1:
self.all.append(arg)
else:
if i > len(self):
i = len(self) - 1
before = self[i - 1]
index_before = self.all.index(before)
self.all.insert(index_before + 1, arg) | python | def insert(self, i, arg):
r"""Insert whitespace, an unparsed argument string, or an argument
object.
:param int i: Index to insert argument into
:param Arg arg: Argument to insert
>>> arguments = TexArgs(['\n', RArg('arg0'), '[arg2]'])
>>> arguments.insert(1, '[arg1]')
>>> len(arguments)
3
>>> arguments
[RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.all
['\n', RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.insert(10, '[arg3]')
>>> arguments[3]
OArg('arg3')
"""
arg = self.__coerce(arg)
if isinstance(arg, Arg):
super().insert(i, arg)
if len(self) <= 1:
self.all.append(arg)
else:
if i > len(self):
i = len(self) - 1
before = self[i - 1]
index_before = self.all.index(before)
self.all.insert(index_before + 1, arg) | [
"def",
"insert",
"(",
"self",
",",
"i",
",",
"arg",
")",
":",
"arg",
"=",
"self",
".",
"__coerce",
"(",
"arg",
")",
"if",
"isinstance",
"(",
"arg",
",",
"Arg",
")",
":",
"super",
"(",
")",
".",
"insert",
"(",
"i",
",",
"arg",
")",
"if",
"len"... | r"""Insert whitespace, an unparsed argument string, or an argument
object.
:param int i: Index to insert argument into
:param Arg arg: Argument to insert
>>> arguments = TexArgs(['\n', RArg('arg0'), '[arg2]'])
>>> arguments.insert(1, '[arg1]')
>>> len(arguments)
3
>>> arguments
[RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.all
['\n', RArg('arg0'), OArg('arg1'), OArg('arg2')]
>>> arguments.insert(10, '[arg3]')
>>> arguments[3]
OArg('arg3') | [
"r",
"Insert",
"whitespace",
"an",
"unparsed",
"argument",
"string",
"or",
"an",
"argument",
"object",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L985-L1017 | train | 212,695 |
alvinwan/TexSoup | TexSoup/data.py | TexArgs.remove | def remove(self, item):
"""Remove either an unparsed argument string or an argument object.
:param Union[str,Arg] item: Item to remove
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.remove('{arg0}')
>>> len(arguments)
2
>>> arguments[0]
OArg('arg2')
"""
item = self.__coerce(item)
self.all.remove(item)
super().remove(item) | python | def remove(self, item):
"""Remove either an unparsed argument string or an argument object.
:param Union[str,Arg] item: Item to remove
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.remove('{arg0}')
>>> len(arguments)
2
>>> arguments[0]
OArg('arg2')
"""
item = self.__coerce(item)
self.all.remove(item)
super().remove(item) | [
"def",
"remove",
"(",
"self",
",",
"item",
")",
":",
"item",
"=",
"self",
".",
"__coerce",
"(",
"item",
")",
"self",
".",
"all",
".",
"remove",
"(",
"item",
")",
"super",
"(",
")",
".",
"remove",
"(",
"item",
")"
] | Remove either an unparsed argument string or an argument object.
:param Union[str,Arg] item: Item to remove
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.remove('{arg0}')
>>> len(arguments)
2
>>> arguments[0]
OArg('arg2') | [
"Remove",
"either",
"an",
"unparsed",
"argument",
"string",
"or",
"an",
"argument",
"object",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L1019-L1033 | train | 212,696 |
alvinwan/TexSoup | TexSoup/data.py | TexArgs.pop | def pop(self, i):
"""Pop argument object at provided index.
:param int i: Index to pop from the list
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.pop(1)
OArg('arg2')
>>> len(arguments)
2
>>> arguments[0]
RArg('arg0')
"""
item = super().pop(i)
j = self.all.index(item)
return self.all.pop(j) | python | def pop(self, i):
"""Pop argument object at provided index.
:param int i: Index to pop from the list
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.pop(1)
OArg('arg2')
>>> len(arguments)
2
>>> arguments[0]
RArg('arg0')
"""
item = super().pop(i)
j = self.all.index(item)
return self.all.pop(j) | [
"def",
"pop",
"(",
"self",
",",
"i",
")",
":",
"item",
"=",
"super",
"(",
")",
".",
"pop",
"(",
"i",
")",
"j",
"=",
"self",
".",
"all",
".",
"index",
"(",
"item",
")",
"return",
"self",
".",
"all",
".",
"pop",
"(",
"j",
")"
] | Pop argument object at provided index.
:param int i: Index to pop from the list
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.pop(1)
OArg('arg2')
>>> len(arguments)
2
>>> arguments[0]
RArg('arg0') | [
"Pop",
"argument",
"object",
"at",
"provided",
"index",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L1035-L1050 | train | 212,697 |
alvinwan/TexSoup | TexSoup/utils.py | Buffer.forward | def forward(self, j=1):
"""Move forward by j steps.
>>> b = Buffer('abcdef')
>>> b.forward(3)
'abc'
>>> b.forward(-2)
'bc'
"""
if j < 0:
return self.backward(-j)
self.__i += j
return self[self.__i-j:self.__i] | python | def forward(self, j=1):
"""Move forward by j steps.
>>> b = Buffer('abcdef')
>>> b.forward(3)
'abc'
>>> b.forward(-2)
'bc'
"""
if j < 0:
return self.backward(-j)
self.__i += j
return self[self.__i-j:self.__i] | [
"def",
"forward",
"(",
"self",
",",
"j",
"=",
"1",
")",
":",
"if",
"j",
"<",
"0",
":",
"return",
"self",
".",
"backward",
"(",
"-",
"j",
")",
"self",
".",
"__i",
"+=",
"j",
"return",
"self",
"[",
"self",
".",
"__i",
"-",
"j",
":",
"self",
"... | Move forward by j steps.
>>> b = Buffer('abcdef')
>>> b.forward(3)
'abc'
>>> b.forward(-2)
'bc' | [
"Move",
"forward",
"by",
"j",
"steps",
"."
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/utils.py#L279-L291 | train | 212,698 |
alvinwan/TexSoup | TexSoup/tex.py | read | def read(tex):
"""Read and parse all LaTeX source
:param Union[str,iterable] tex: LaTeX source
:return TexEnv: the global environment
"""
if isinstance(tex, str):
tex = tex
else:
tex = ''.join(itertools.chain(*tex))
buf, children = Buffer(tokenize(tex)), []
while buf.hasNext():
content = read_tex(buf)
if content is not None:
children.append(content)
return TexEnv('[tex]', children), tex | python | def read(tex):
"""Read and parse all LaTeX source
:param Union[str,iterable] tex: LaTeX source
:return TexEnv: the global environment
"""
if isinstance(tex, str):
tex = tex
else:
tex = ''.join(itertools.chain(*tex))
buf, children = Buffer(tokenize(tex)), []
while buf.hasNext():
content = read_tex(buf)
if content is not None:
children.append(content)
return TexEnv('[tex]', children), tex | [
"def",
"read",
"(",
"tex",
")",
":",
"if",
"isinstance",
"(",
"tex",
",",
"str",
")",
":",
"tex",
"=",
"tex",
"else",
":",
"tex",
"=",
"''",
".",
"join",
"(",
"itertools",
".",
"chain",
"(",
"*",
"tex",
")",
")",
"buf",
",",
"children",
"=",
... | Read and parse all LaTeX source
:param Union[str,iterable] tex: LaTeX source
:return TexEnv: the global environment | [
"Read",
"and",
"parse",
"all",
"LaTeX",
"source"
] | 63323ed71510fd2351102b8c36660a3b7703cead | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/tex.py#L7-L22 | train | 212,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.