body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def deleteAttr(self, attr):
'Delete `attr` from node\n\n Arguments:\n attr (Plug): Attribute to remove\n\n Example:\n >>> node = createNode("transform")\n >>> node["myAttr"] = Double()\n >>> node.deleteAttr("myAttr")\n >>> node.hasAttr("myAttr")\n False\n\n '
if (not isinstance(attr, Plug)):
attr = self[attr]
attribute = attr._mplug.attribute()
self._fn.removeAttribute(attribute)
| 4,967,128,804,810,269,000
|
Delete `attr` from node
Arguments:
attr (Plug): Attribute to remove
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.deleteAttr("myAttr")
>>> node.hasAttr("myAttr")
False
|
cmdx.py
|
deleteAttr
|
fvbehr/cmdx
|
python
|
def deleteAttr(self, attr):
'Delete `attr` from node\n\n Arguments:\n attr (Plug): Attribute to remove\n\n Example:\n >>> node = createNode("transform")\n >>> node["myAttr"] = Double()\n >>> node.deleteAttr("myAttr")\n >>> node.hasAttr("myAttr")\n False\n\n '
if (not isinstance(attr, Plug)):
attr = self[attr]
attribute = attr._mplug.attribute()
self._fn.removeAttribute(attribute)
|
def connections(self, type=None, unit=None, plugs=False):
'Yield plugs of node with a connection to any other plug\n\n Arguments:\n unit (int, optional): Return plug in this unit,\n e.g. Meters or Radians\n type (str, optional): Restrict output to nodes of this type,\n e.g. "transform" or "mesh"\n plugs (bool, optional): Return plugs, rather than nodes\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", name="A")\n >>> b = createNode("multDoubleLinear", name="B")\n >>> a["ihi"] << b["ihi"]\n >>> list(a.connections()) == [b]\n True\n >>> list(b.connections()) == [a]\n True\n >>> a.connection() == b\n True\n\n '
for plug in self._fn.getConnections():
mobject = plug.node()
node = Node(mobject)
if ((not type) or (type == node._fn.typeName)):
plug = Plug(node, plug, unit)
for connection in plug.connections(plugs=plugs):
(yield connection)
| -953,114,406,584,097,200
|
Yield plugs of node with a connection to any other plug
Arguments:
unit (int, optional): Return plug in this unit,
e.g. Meters or Radians
type (str, optional): Restrict output to nodes of this type,
e.g. "transform" or "mesh"
plugs (bool, optional): Return plugs, rather than nodes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> list(a.connections()) == [b]
True
>>> list(b.connections()) == [a]
True
>>> a.connection() == b
True
|
cmdx.py
|
connections
|
fvbehr/cmdx
|
python
|
def connections(self, type=None, unit=None, plugs=False):
'Yield plugs of node with a connection to any other plug\n\n Arguments:\n unit (int, optional): Return plug in this unit,\n e.g. Meters or Radians\n type (str, optional): Restrict output to nodes of this type,\n e.g. "transform" or "mesh"\n plugs (bool, optional): Return plugs, rather than nodes\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", name="A")\n >>> b = createNode("multDoubleLinear", name="B")\n >>> a["ihi"] << b["ihi"]\n >>> list(a.connections()) == [b]\n True\n >>> list(b.connections()) == [a]\n True\n >>> a.connection() == b\n True\n\n '
for plug in self._fn.getConnections():
mobject = plug.node()
node = Node(mobject)
if ((not type) or (type == node._fn.typeName)):
plug = Plug(node, plug, unit)
for connection in plug.connections(plugs=plugs):
(yield connection)
|
def connection(self, type=None, unit=None, plug=False):
'Singular version of :func:`connections()`'
return next(self.connections(type, unit, plug), None)
| -1,656,226,878,386,424,600
|
Singular version of :func:`connections()`
|
cmdx.py
|
connection
|
fvbehr/cmdx
|
python
|
def connection(self, type=None, unit=None, plug=False):
return next(self.connections(type, unit, plug), None)
|
@protected
def path(self):
'Return full path to node\n\n Example:\n >>> parent = createNode("transform", "myParent")\n >>> child = createNode("transform", "myChild", parent=parent)\n >>> child.name()\n u\'myChild\'\n >>> child.path()\n u\'|myParent|myChild\'\n\n '
return self._fn.fullPathName()
| -6,994,904,142,941,890,000
|
Return full path to node
Example:
>>> parent = createNode("transform", "myParent")
>>> child = createNode("transform", "myChild", parent=parent)
>>> child.name()
u'myChild'
>>> child.path()
u'|myParent|myChild'
|
cmdx.py
|
path
|
fvbehr/cmdx
|
python
|
@protected
def path(self):
'Return full path to node\n\n Example:\n >>> parent = createNode("transform", "myParent")\n >>> child = createNode("transform", "myChild", parent=parent)\n >>> child.name()\n u\'myChild\'\n >>> child.path()\n u\'|myParent|myChild\'\n\n '
return self._fn.fullPathName()
|
@protected
def dagPath(self):
'Return a om.MDagPath for this node\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> parent = createNode("transform", name="Parent")\n >>> child = createNode("transform", name="Child", parent=parent)\n >>> path = child.dagPath()\n >>> str(path)\n \'Child\'\n >>> str(path.pop())\n \'Parent\'\n\n '
return om.MDagPath.getAPathTo(self._mobject)
| 4,291,435,113,795,996,700
|
Return a om.MDagPath for this node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="Parent")
>>> child = createNode("transform", name="Child", parent=parent)
>>> path = child.dagPath()
>>> str(path)
'Child'
>>> str(path.pop())
'Parent'
|
cmdx.py
|
dagPath
|
fvbehr/cmdx
|
python
|
@protected
def dagPath(self):
'Return a om.MDagPath for this node\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> parent = createNode("transform", name="Parent")\n >>> child = createNode("transform", name="Child", parent=parent)\n >>> path = child.dagPath()\n >>> str(path)\n \'Child\'\n >>> str(path.pop())\n \'Parent\'\n\n '
return om.MDagPath.getAPathTo(self._mobject)
|
@protected
def shortestPath(self):
'Return shortest unique path to node\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> parent = createNode("transform", name="myParent")\n >>> child = createNode("transform", name="myChild", parent=parent)\n >>> child.shortestPath()\n u\'myChild\'\n >>> child = createNode("transform", name="myChild")\n >>> # Now `myChild` could refer to more than a single node\n >>> child.shortestPath()\n u\'|myChild\'\n\n '
return self._fn.partialPathName()
| 5,479,381,250,253,778,000
|
Return shortest unique path to node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="myParent")
>>> child = createNode("transform", name="myChild", parent=parent)
>>> child.shortestPath()
u'myChild'
>>> child = createNode("transform", name="myChild")
>>> # Now `myChild` could refer to more than a single node
>>> child.shortestPath()
u'|myChild'
|
cmdx.py
|
shortestPath
|
fvbehr/cmdx
|
python
|
@protected
def shortestPath(self):
'Return shortest unique path to node\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> parent = createNode("transform", name="myParent")\n >>> child = createNode("transform", name="myChild", parent=parent)\n >>> child.shortestPath()\n u\'myChild\'\n >>> child = createNode("transform", name="myChild")\n >>> # Now `myChild` could refer to more than a single node\n >>> child.shortestPath()\n u\'|myChild\'\n\n '
return self._fn.partialPathName()
|
@property
def level(self):
'Return the number of parents this DAG node has\n\n Example:\n >>> parent = createNode("transform")\n >>> child = createNode("transform", parent=parent)\n >>> child.level\n 1\n >>> parent.level\n 0\n\n '
return (self.path().count('|') - 1)
| 4,354,758,280,728,383,000
|
Return the number of parents this DAG node has
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.level
1
>>> parent.level
0
|
cmdx.py
|
level
|
fvbehr/cmdx
|
python
|
@property
def level(self):
'Return the number of parents this DAG node has\n\n Example:\n >>> parent = createNode("transform")\n >>> child = createNode("transform", parent=parent)\n >>> child.level\n 1\n >>> parent.level\n 0\n\n '
return (self.path().count('|') - 1)
|
@property
def boundingBox(self):
'Return a cmdx.BoundingBox of this DAG node'
return BoundingBox(self._fn.boundingBox)
| 5,388,794,584,110,900,000
|
Return a cmdx.BoundingBox of this DAG node
|
cmdx.py
|
boundingBox
|
fvbehr/cmdx
|
python
|
@property
def boundingBox(self):
return BoundingBox(self._fn.boundingBox)
|
def hide(self):
'Set visibility to False'
self['visibility'] = False
| -7,527,321,505,382,108,000
|
Set visibility to False
|
cmdx.py
|
hide
|
fvbehr/cmdx
|
python
|
def hide(self):
self['visibility'] = False
|
def show(self):
'Set visibility to True'
self['visibility'] = True
| -7,172,749,785,531,764,000
|
Set visibility to True
|
cmdx.py
|
show
|
fvbehr/cmdx
|
python
|
def show(self):
self['visibility'] = True
|
def addChild(self, child, index=Last):
'Add `child` to self\n\n Arguments:\n child (Node): Child to add\n index (int, optional): Physical location in hierarchy,\n defaults to cmdx.Last\n\n Example:\n >>> parent = createNode("transform")\n >>> child = createNode("transform")\n >>> parent.addChild(child)\n\n '
mobject = child._mobject
self._fn.addChild(mobject, index)
| -4,976,683,699,214,987,000
|
Add `child` to self
Arguments:
child (Node): Child to add
index (int, optional): Physical location in hierarchy,
defaults to cmdx.Last
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform")
>>> parent.addChild(child)
|
cmdx.py
|
addChild
|
fvbehr/cmdx
|
python
|
def addChild(self, child, index=Last):
'Add `child` to self\n\n Arguments:\n child (Node): Child to add\n index (int, optional): Physical location in hierarchy,\n defaults to cmdx.Last\n\n Example:\n >>> parent = createNode("transform")\n >>> child = createNode("transform")\n >>> parent.addChild(child)\n\n '
mobject = child._mobject
self._fn.addChild(mobject, index)
|
def assembly(self):
'Return the top-level parent of node\n\n Example:\n >>> parent1 = createNode("transform")\n >>> parent2 = createNode("transform")\n >>> child = createNode("transform", parent=parent1)\n >>> grandchild = createNode("transform", parent=child)\n >>> child.assembly() == parent1\n True\n >>> parent2.assembly() == parent2\n True\n\n '
path = self._fn.getPath()
root = None
for level in range((path.length() - 1)):
root = path.pop()
return (self.__class__(root.node()) if root else self)
| -5,640,171,306,117,309,000
|
Return the top-level parent of node
Example:
>>> parent1 = createNode("transform")
>>> parent2 = createNode("transform")
>>> child = createNode("transform", parent=parent1)
>>> grandchild = createNode("transform", parent=child)
>>> child.assembly() == parent1
True
>>> parent2.assembly() == parent2
True
|
cmdx.py
|
assembly
|
fvbehr/cmdx
|
python
|
def assembly(self):
'Return the top-level parent of node\n\n Example:\n >>> parent1 = createNode("transform")\n >>> parent2 = createNode("transform")\n >>> child = createNode("transform", parent=parent1)\n >>> grandchild = createNode("transform", parent=child)\n >>> child.assembly() == parent1\n True\n >>> parent2.assembly() == parent2\n True\n\n '
path = self._fn.getPath()
root = None
for level in range((path.length() - 1)):
root = path.pop()
return (self.__class__(root.node()) if root else self)
|
def transform(self, space=sObject, time=None):
'Return TransformationMatrix'
plug = (self['worldMatrix'][0] if (space == sWorld) else self['matrix'])
return TransformationMatrix(plug.asMatrix(time))
| -7,353,854,567,979,109,000
|
Return TransformationMatrix
|
cmdx.py
|
transform
|
fvbehr/cmdx
|
python
|
def transform(self, space=sObject, time=None):
plug = (self['worldMatrix'][0] if (space == sWorld) else self['matrix'])
return TransformationMatrix(plug.asMatrix(time))
|
def mapFrom(self, other, time=None):
'Return TransformationMatrix of `other` relative self\n\n Example:\n >>> a = createNode("transform")\n >>> b = createNode("transform")\n >>> a["translate"] = (0, 5, 0)\n >>> b["translate"] = (0, -5, 0)\n >>> delta = a.mapFrom(b)\n >>> delta.translation()[1]\n 10.0\n >>> a = createNode("transform")\n >>> b = createNode("transform")\n >>> a["translate"] = (0, 5, 0)\n >>> b["translate"] = (0, -15, 0)\n >>> delta = a.mapFrom(b)\n >>> delta.translation()[1]\n 20.0\n\n '
a = self['worldMatrix'][0].asMatrix(time)
b = other['worldInverseMatrix'][0].asMatrix(time)
delta = (a * b)
return TransformationMatrix(delta)
| -8,735,090,727,095,845,000
|
Return TransformationMatrix of `other` relative self
Example:
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -5, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
10.0
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -15, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
20.0
|
cmdx.py
|
mapFrom
|
fvbehr/cmdx
|
python
|
def mapFrom(self, other, time=None):
'Return TransformationMatrix of `other` relative self\n\n Example:\n >>> a = createNode("transform")\n >>> b = createNode("transform")\n >>> a["translate"] = (0, 5, 0)\n >>> b["translate"] = (0, -5, 0)\n >>> delta = a.mapFrom(b)\n >>> delta.translation()[1]\n 10.0\n >>> a = createNode("transform")\n >>> b = createNode("transform")\n >>> a["translate"] = (0, 5, 0)\n >>> b["translate"] = (0, -15, 0)\n >>> delta = a.mapFrom(b)\n >>> delta.translation()[1]\n 20.0\n\n '
a = self['worldMatrix'][0].asMatrix(time)
b = other['worldInverseMatrix'][0].asMatrix(time)
delta = (a * b)
return TransformationMatrix(delta)
|
def mapTo(self, other, time=None):
'Return TransformationMatrix of self relative `other`\n\n See :func:`mapFrom` for examples.\n\n '
return other.mapFrom(self, time)
| -7,217,237,857,843,995,000
|
Return TransformationMatrix of self relative `other`
See :func:`mapFrom` for examples.
|
cmdx.py
|
mapTo
|
fvbehr/cmdx
|
python
|
def mapTo(self, other, time=None):
'Return TransformationMatrix of self relative `other`\n\n See :func:`mapFrom` for examples.\n\n '
return other.mapFrom(self, time)
|
def parent(self, type=None):
'Return parent of node\n\n Arguments:\n type (str, optional): Return parent, only if it matches this type\n\n Example:\n >>> parent = createNode("transform")\n >>> child = createNode("transform", parent=parent)\n >>> child.parent() == parent\n True\n >>> not child.parent(type="camera")\n True\n >>> parent.parent()\n\n '
mobject = self._fn.parent(0)
if (mobject.apiType() == om.MFn.kWorld):
return
cls = self.__class__
if ((not type) or (type == self._fn.__class__(mobject).typeName)):
return cls(mobject)
| 1,257,059,951,428,212,500
|
Return parent of node
Arguments:
type (str, optional): Return parent, only if it matches this type
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> not child.parent(type="camera")
True
>>> parent.parent()
|
cmdx.py
|
parent
|
fvbehr/cmdx
|
python
|
def parent(self, type=None):
'Return parent of node\n\n Arguments:\n type (str, optional): Return parent, only if it matches this type\n\n Example:\n >>> parent = createNode("transform")\n >>> child = createNode("transform", parent=parent)\n >>> child.parent() == parent\n True\n >>> not child.parent(type="camera")\n True\n >>> parent.parent()\n\n '
mobject = self._fn.parent(0)
if (mobject.apiType() == om.MFn.kWorld):
return
cls = self.__class__
if ((not type) or (type == self._fn.__class__(mobject).typeName)):
return cls(mobject)
|
def children(self, type=None, filter=om.MFn.kTransform, query=None, contains=None):
'Return children of node\n\n All returned children are transform nodes, as specified by the\n `filter` argument. For shapes, use the :func:`shapes` method.\n The `contains` argument only returns transform nodes containing\n a shape of the type provided.\n\n Arguments:\n type (str, optional): Return only children that match this type\n filter (int, optional): Return only children with this function set\n contains (str, optional): Child must have a shape of this type\n query (dict, optional): Limit output to nodes with these attributes\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", "a")\n >>> b = createNode("transform", "b", parent=a)\n >>> c = createNode("transform", "c", parent=a)\n >>> d = createNode("mesh", "d", parent=c)\n >>> list(a.children()) == [b, c]\n True\n >>> a.child() == b\n True\n >>> c.child(type="mesh")\n >>> c.child(type="mesh", filter=None) == d\n True\n >>> c.child(type=("mesh", "transform"), filter=None) == d\n True\n >>> a.child() == b\n True\n >>> a.child(contains="mesh") == c\n True\n >>> a.child(contains="nurbsCurve") is None\n True\n >>> b["myAttr"] = Double(default=5)\n >>> a.child(query=["myAttr"]) == b\n True\n >>> a.child(query=["noExist"]) is None\n True\n >>> a.child(query={"myAttr": 5}) == b\n True\n >>> a.child(query={"myAttr": 1}) is None\n True\n\n '
if self.isA(kShape):
return
cls = DagNode
Fn = self._fn.__class__
op = operator.eq
if isinstance(type, (tuple, list)):
op = operator.contains
other = ('typeId' if isinstance(type, om.MTypeId) else 'typeName')
for index in range(self._fn.childCount()):
try:
mobject = self._fn.child(index)
except RuntimeError:
log.warning(('Child %d of %s not found, this is a bug' % (index, self)))
raise
if ((filter is not None) and (not mobject.hasFn(filter))):
continue
if ((not type) or op(type, getattr(Fn(mobject), other))):
node = cls(mobject)
if ((not contains) or node.shape(type=contains)):
if (query is None):
(yield node)
elif isinstance(query, dict):
try:
if all(((node[key] == value) for (key, value) in query.items())):
(yield node)
except ExistError:
continue
elif all(((key in node) for key in query)):
(yield node)
| -6,300,038,422,551,337,000
|
Return children of node
All returned children are transform nodes, as specified by the
`filter` argument. For shapes, use the :func:`shapes` method.
The `contains` argument only returns transform nodes containing
a shape of the type provided.
Arguments:
type (str, optional): Return only children that match this type
filter (int, optional): Return only children with this function set
contains (str, optional): Child must have a shape of this type
query (dict, optional): Limit output to nodes with these attributes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=a)
>>> d = createNode("mesh", "d", parent=c)
>>> list(a.children()) == [b, c]
True
>>> a.child() == b
True
>>> c.child(type="mesh")
>>> c.child(type="mesh", filter=None) == d
True
>>> c.child(type=("mesh", "transform"), filter=None) == d
True
>>> a.child() == b
True
>>> a.child(contains="mesh") == c
True
>>> a.child(contains="nurbsCurve") is None
True
>>> b["myAttr"] = Double(default=5)
>>> a.child(query=["myAttr"]) == b
True
>>> a.child(query=["noExist"]) is None
True
>>> a.child(query={"myAttr": 5}) == b
True
>>> a.child(query={"myAttr": 1}) is None
True
|
cmdx.py
|
children
|
fvbehr/cmdx
|
python
|
def children(self, type=None, filter=om.MFn.kTransform, query=None, contains=None):
'Return children of node\n\n All returned children are transform nodes, as specified by the\n `filter` argument. For shapes, use the :func:`shapes` method.\n The `contains` argument only returns transform nodes containing\n a shape of the type provided.\n\n Arguments:\n type (str, optional): Return only children that match this type\n filter (int, optional): Return only children with this function set\n contains (str, optional): Child must have a shape of this type\n query (dict, optional): Limit output to nodes with these attributes\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", "a")\n >>> b = createNode("transform", "b", parent=a)\n >>> c = createNode("transform", "c", parent=a)\n >>> d = createNode("mesh", "d", parent=c)\n >>> list(a.children()) == [b, c]\n True\n >>> a.child() == b\n True\n >>> c.child(type="mesh")\n >>> c.child(type="mesh", filter=None) == d\n True\n >>> c.child(type=("mesh", "transform"), filter=None) == d\n True\n >>> a.child() == b\n True\n >>> a.child(contains="mesh") == c\n True\n >>> a.child(contains="nurbsCurve") is None\n True\n >>> b["myAttr"] = Double(default=5)\n >>> a.child(query=["myAttr"]) == b\n True\n >>> a.child(query=["noExist"]) is None\n True\n >>> a.child(query={"myAttr": 5}) == b\n True\n >>> a.child(query={"myAttr": 1}) is None\n True\n\n '
if self.isA(kShape):
return
cls = DagNode
Fn = self._fn.__class__
op = operator.eq
if isinstance(type, (tuple, list)):
op = operator.contains
other = ('typeId' if isinstance(type, om.MTypeId) else 'typeName')
for index in range(self._fn.childCount()):
try:
mobject = self._fn.child(index)
except RuntimeError:
log.warning(('Child %d of %s not found, this is a bug' % (index, self)))
raise
if ((filter is not None) and (not mobject.hasFn(filter))):
continue
if ((not type) or op(type, getattr(Fn(mobject), other))):
node = cls(mobject)
if ((not contains) or node.shape(type=contains)):
if (query is None):
(yield node)
elif isinstance(query, dict):
try:
if all(((node[key] == value) for (key, value) in query.items())):
(yield node)
except ExistError:
continue
elif all(((key in node) for key in query)):
(yield node)
|
def descendent(self, type=om.MFn.kInvalid):
'Singular version of :func:`descendents()`\n\n A recursive, depth-first search.\n\n .. code-block:: python\n\n a\n |\n b---d\n | |\n c e\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", "a")\n >>> b = createNode("transform", "b", parent=a)\n >>> c = createNode("transform", "c", parent=b)\n >>> d = createNode("transform", "d", parent=b)\n >>> e = createNode("transform", "e", parent=d)\n >>> a.descendent() == a.child()\n True\n >>> list(a.descendents()) == [b, c, d, e]\n True\n >>> f = createNode("mesh", "f", parent=e)\n >>> list(a.descendents(type="mesh")) == [f]\n True\n\n '
return next(self.descendents(type), None)
| -2,679,626,154,566,718,000
|
Singular version of :func:`descendents()`
A recursive, depth-first search.
.. code-block:: python
a
|
b---d
| |
c e
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=b)
>>> d = createNode("transform", "d", parent=b)
>>> e = createNode("transform", "e", parent=d)
>>> a.descendent() == a.child()
True
>>> list(a.descendents()) == [b, c, d, e]
True
>>> f = createNode("mesh", "f", parent=e)
>>> list(a.descendents(type="mesh")) == [f]
True
|
cmdx.py
|
descendent
|
fvbehr/cmdx
|
python
|
def descendent(self, type=om.MFn.kInvalid):
'Singular version of :func:`descendents()`\n\n A recursive, depth-first search.\n\n .. code-block:: python\n\n a\n |\n b---d\n | |\n c e\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", "a")\n >>> b = createNode("transform", "b", parent=a)\n >>> c = createNode("transform", "c", parent=b)\n >>> d = createNode("transform", "d", parent=b)\n >>> e = createNode("transform", "e", parent=d)\n >>> a.descendent() == a.child()\n True\n >>> list(a.descendents()) == [b, c, d, e]\n True\n >>> f = createNode("mesh", "f", parent=e)\n >>> list(a.descendents(type="mesh")) == [f]\n True\n\n '
return next(self.descendents(type), None)
|
def duplicate(self):
'Return a duplicate of self'
return self.__class__(self._fn.duplicate())
| -4,786,223,266,813,627,000
|
Return a duplicate of self
|
cmdx.py
|
duplicate
|
fvbehr/cmdx
|
python
|
def duplicate(self):
return self.__class__(self._fn.duplicate())
|
def clone(self, name=None, parent=None, worldspace=False):
'Return a clone of self\n\n A "clone" assignes the .outMesh attribute of a mesh node\n to the `.inMesh` of the resulting clone.\n\n Supports:\n - mesh\n\n Arguments:\n name (str, optional): Name of newly created clone\n parent (DagNode, optional): Parent to newly cloned node\n worldspace (bool, optional): Translate output to worldspace\n\n '
if (self.isA(kShape) and (self.typeName == 'mesh')):
assert (parent is not None), 'mesh cloning requires parent argument'
(name or (parent.name() + 'Clone'))
with DagModifier() as mod:
mesh = mod.createNode('mesh', name, parent)
(mesh['inMesh'] << self['outMesh'])
return mesh
else:
raise TypeError(('Unsupported clone target: %s' % self))
| 6,024,038,159,471,500,000
|
Return a clone of self
A "clone" assignes the .outMesh attribute of a mesh node
to the `.inMesh` of the resulting clone.
Supports:
- mesh
Arguments:
name (str, optional): Name of newly created clone
parent (DagNode, optional): Parent to newly cloned node
worldspace (bool, optional): Translate output to worldspace
|
cmdx.py
|
clone
|
fvbehr/cmdx
|
python
|
def clone(self, name=None, parent=None, worldspace=False):
'Return a clone of self\n\n A "clone" assignes the .outMesh attribute of a mesh node\n to the `.inMesh` of the resulting clone.\n\n Supports:\n - mesh\n\n Arguments:\n name (str, optional): Name of newly created clone\n parent (DagNode, optional): Parent to newly cloned node\n worldspace (bool, optional): Translate output to worldspace\n\n '
if (self.isA(kShape) and (self.typeName == 'mesh')):
assert (parent is not None), 'mesh cloning requires parent argument'
(name or (parent.name() + 'Clone'))
with DagModifier() as mod:
mesh = mod.createNode('mesh', name, parent)
(mesh['inMesh'] << self['outMesh'])
return mesh
else:
raise TypeError(('Unsupported clone target: %s' % self))
|
def add(self, member):
'Add single `member` to set\n\n Arguments:\n member (cmdx.Node): Node to add\n\n '
return self.update([member])
| -1,154,492,052,028,597,800
|
Add single `member` to set
Arguments:
member (cmdx.Node): Node to add
|
cmdx.py
|
add
|
fvbehr/cmdx
|
python
|
def add(self, member):
'Add single `member` to set\n\n Arguments:\n member (cmdx.Node): Node to add\n\n '
return self.update([member])
|
def update(self, members):
'Add several `members` to set\n\n Arguments:\n members (list): Series of cmdx.Node instances\n\n '
cmds.sets(list(map(str, members)), forceElement=self.path())
| 6,528,813,593,263,597,000
|
Add several `members` to set
Arguments:
members (list): Series of cmdx.Node instances
|
cmdx.py
|
update
|
fvbehr/cmdx
|
python
|
def update(self, members):
'Add several `members` to set\n\n Arguments:\n members (list): Series of cmdx.Node instances\n\n '
cmds.sets(list(map(str, members)), forceElement=self.path())
|
def clear(self):
'Remove all members from set'
mobj = _encode1(self.name(namespace=True))
fn = om1.MFnSet(mobj)
fn.clear()
| -4,805,899,163,082,157,000
|
Remove all members from set
|
cmdx.py
|
clear
|
fvbehr/cmdx
|
python
|
def clear(self):
mobj = _encode1(self.name(namespace=True))
fn = om1.MFnSet(mobj)
fn.clear()
|
def sort(self, key=(lambda o: (o.typeName, o.path()))):
'Sort members of set by `key`\n\n Arguments:\n key (lambda): See built-in `sorted(key)` for reference\n\n '
members = sorted(self.members(), key=key)
self.clear()
self.update(members)
| 7,600,609,802,508,725,000
|
Sort members of set by `key`
Arguments:
key (lambda): See built-in `sorted(key)` for reference
|
cmdx.py
|
sort
|
fvbehr/cmdx
|
python
|
def sort(self, key=(lambda o: (o.typeName, o.path()))):
'Sort members of set by `key`\n\n Arguments:\n key (lambda): See built-in `sorted(key)` for reference\n\n '
members = sorted(self.members(), key=key)
self.clear()
self.update(members)
|
def descendent(self, type=None):
'Return the first descendent'
return next(self.descendents(type), None)
| 2,758,731,034,804,471,000
|
Return the first descendent
|
cmdx.py
|
descendent
|
fvbehr/cmdx
|
python
|
def descendent(self, type=None):
return next(self.descendents(type), None)
|
def descendents(self, type=None):
'Return hierarchy of objects in set'
for member in self.members(type=type):
(yield member)
try:
for child in member.descendents(type=type):
(yield child)
except AttributeError:
continue
| 2,284,004,089,486,410,800
|
Return hierarchy of objects in set
|
cmdx.py
|
descendents
|
fvbehr/cmdx
|
python
|
def descendents(self, type=None):
for member in self.members(type=type):
(yield member)
try:
for child in member.descendents(type=type):
(yield child)
except AttributeError:
continue
|
def flatten(self, type=None):
'Return members, converting nested object sets into its members\n\n Example:\n >>> from maya import cmds\n >>> _ = cmds.file(new=True, force=True)\n >>> a = cmds.createNode("transform", name="a")\n >>> b = cmds.createNode("transform", name="b")\n >>> c = cmds.createNode("transform", name="c")\n >>> cmds.select(a)\n >>> gc = cmds.sets([a], name="grandchild")\n >>> cc = cmds.sets([gc, b], name="child")\n >>> parent = cmds.sets([cc, c], name="parent")\n >>> mainset = encode(parent)\n >>> sorted(mainset.flatten(), key=lambda n: n.name())\n [|a, |b, |c]\n\n '
members = set()
def recurse(objset):
for member in objset:
if member.isA(om.MFn.kSet):
recurse(member)
elif (type is not None):
if (type == member.typeName):
members.add(member)
else:
members.add(member)
recurse(self)
return list(members)
| -7,318,383,916,224,831,000
|
Return members, converting nested object sets into its members
Example:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> a = cmds.createNode("transform", name="a")
>>> b = cmds.createNode("transform", name="b")
>>> c = cmds.createNode("transform", name="c")
>>> cmds.select(a)
>>> gc = cmds.sets([a], name="grandchild")
>>> cc = cmds.sets([gc, b], name="child")
>>> parent = cmds.sets([cc, c], name="parent")
>>> mainset = encode(parent)
>>> sorted(mainset.flatten(), key=lambda n: n.name())
[|a, |b, |c]
|
cmdx.py
|
flatten
|
fvbehr/cmdx
|
python
|
def flatten(self, type=None):
'Return members, converting nested object sets into its members\n\n Example:\n >>> from maya import cmds\n >>> _ = cmds.file(new=True, force=True)\n >>> a = cmds.createNode("transform", name="a")\n >>> b = cmds.createNode("transform", name="b")\n >>> c = cmds.createNode("transform", name="c")\n >>> cmds.select(a)\n >>> gc = cmds.sets([a], name="grandchild")\n >>> cc = cmds.sets([gc, b], name="child")\n >>> parent = cmds.sets([cc, c], name="parent")\n >>> mainset = encode(parent)\n >>> sorted(mainset.flatten(), key=lambda n: n.name())\n [|a, |b, |c]\n\n '
members = set()
def recurse(objset):
for member in objset:
if member.isA(om.MFn.kSet):
recurse(member)
elif (type is not None):
if (type == member.typeName):
members.add(member)
else:
members.add(member)
recurse(self)
return list(members)
|
def member(self, type=None):
'Return the first member'
return next(self.members(type), None)
| 2,758,058,022,612,825,600
|
Return the first member
|
cmdx.py
|
member
|
fvbehr/cmdx
|
python
|
def member(self, type=None):
return next(self.members(type), None)
|
def __abs__(self):
'Return absolute value of plug\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = -10\n >>> abs(node["tx"])\n 10.0\n\n '
return abs(self.read())
| 3,696,060,056,975,704,000
|
Return absolute value of plug
Example:
>>> node = createNode("transform")
>>> node["tx"] = -10
>>> abs(node["tx"])
10.0
|
cmdx.py
|
__abs__
|
fvbehr/cmdx
|
python
|
def __abs__(self):
'Return absolute value of plug\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = -10\n >>> abs(node["tx"])\n 10.0\n\n '
return abs(self.read())
|
def __bool__(self):
'if plug:\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = 10\n >>> if node["tx"]:\n ... True\n ...\n True\n\n '
return bool(self.read())
| 5,208,369,855,668,886,000
|
if plug:
Example:
>>> node = createNode("transform")
>>> node["tx"] = 10
>>> if node["tx"]:
... True
...
True
|
cmdx.py
|
__bool__
|
fvbehr/cmdx
|
python
|
def __bool__(self):
'if plug:\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = 10\n >>> if node["tx"]:\n ... True\n ...\n True\n\n '
return bool(self.read())
|
def __float__(self):
'Return plug as floating point value\n\n Example:\n >>> node = createNode("transform")\n >>> float(node["visibility"])\n 1.0\n\n '
return float(self.read())
| -1,371,728,202,996,577,800
|
Return plug as floating point value
Example:
>>> node = createNode("transform")
>>> float(node["visibility"])
1.0
|
cmdx.py
|
__float__
|
fvbehr/cmdx
|
python
|
def __float__(self):
'Return plug as floating point value\n\n Example:\n >>> node = createNode("transform")\n >>> float(node["visibility"])\n 1.0\n\n '
return float(self.read())
|
def __int__(self):
'Return plug as int\n\n Example:\n >>> node = createNode("transform")\n >>> int(node["visibility"])\n 1\n\n '
return int(self.read())
| -5,482,114,458,824,505,000
|
Return plug as int
Example:
>>> node = createNode("transform")
>>> int(node["visibility"])
1
|
cmdx.py
|
__int__
|
fvbehr/cmdx
|
python
|
def __int__(self):
'Return plug as int\n\n Example:\n >>> node = createNode("transform")\n >>> int(node["visibility"])\n 1\n\n '
return int(self.read())
|
def __eq__(self, other):
'Compare plug to `other`\n\n Example:\n >>> node = createNode("transform")\n >>> node["visibility"] == True\n True\n >>> node["visibility"] == node["nodeState"]\n False\n >>> node["visibility"] != node["nodeState"]\n True\n\n '
if isinstance(other, Plug):
other = other.read()
return (self.read() == other)
| -1,027,741,186,896,287,600
|
Compare plug to `other`
Example:
>>> node = createNode("transform")
>>> node["visibility"] == True
True
>>> node["visibility"] == node["nodeState"]
False
>>> node["visibility"] != node["nodeState"]
True
|
cmdx.py
|
__eq__
|
fvbehr/cmdx
|
python
|
def __eq__(self, other):
'Compare plug to `other`\n\n Example:\n >>> node = createNode("transform")\n >>> node["visibility"] == True\n True\n >>> node["visibility"] == node["nodeState"]\n False\n >>> node["visibility"] != node["nodeState"]\n True\n\n '
if isinstance(other, Plug):
other = other.read()
return (self.read() == other)
|
def __neg__(self):
'Negate unary operator\n\n Example:\n >>> node = createNode("transform")\n >>> node["visibility"] = 1\n >>> -node["visibility"]\n -1\n\n '
return (- self.read())
| 1,442,792,006,433,584,400
|
Negate unary operator
Example:
>>> node = createNode("transform")
>>> node["visibility"] = 1
>>> -node["visibility"]
-1
|
cmdx.py
|
__neg__
|
fvbehr/cmdx
|
python
|
def __neg__(self):
'Negate unary operator\n\n Example:\n >>> node = createNode("transform")\n >>> node["visibility"] = 1\n >>> -node["visibility"]\n -1\n\n '
return (- self.read())
|
def __div__(self, other):
'Python 2.x division\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = 5\n >>> node["ty"] = 2\n >>> node["tx"] / node["ty"]\n 2.5\n\n '
if isinstance(other, Plug):
other = other.read()
return (self.read() / other)
| -695,580,136,023,903,700
|
Python 2.x division
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["ty"] = 2
>>> node["tx"] / node["ty"]
2.5
|
cmdx.py
|
__div__
|
fvbehr/cmdx
|
python
|
def __div__(self, other):
'Python 2.x division\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = 5\n >>> node["ty"] = 2\n >>> node["tx"] / node["ty"]\n 2.5\n\n '
if isinstance(other, Plug):
other = other.read()
return (self.read() / other)
|
def __truediv__(self, other):
'Float division, e.g. self / other'
if isinstance(other, Plug):
other = other.read()
return (self.read() / other)
| -4,273,911,146,258,172,400
|
Float division, e.g. self / other
|
cmdx.py
|
__truediv__
|
fvbehr/cmdx
|
python
|
def __truediv__(self, other):
if isinstance(other, Plug):
other = other.read()
return (self.read() / other)
|
def __add__(self, other):
'Support legacy add string to plug\n\n Note:\n Adding to short name is faster, e.g. node["t"] + "x",\n than adding to longName, e.g. node["translate"] + "X"\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = 5\n >>> node["translate"] + "X"\n 5.0\n >>> node["t"] + "x"\n 5.0\n >>> try:\n ... node["t"] + node["r"]\n ... except TypeError:\n ... error = True\n ...\n >>> error\n True\n\n '
if isinstance(other, str):
try:
return self._node[(self.name() + other)]
except ExistError:
return self._node[(self.name(long=True) + other)]
raise TypeError(("unsupported operand type(s) for +: 'Plug' and '%s'" % type(other)))
| -1,802,765,904,493,526,500
|
Support legacy add string to plug
Note:
Adding to short name is faster, e.g. node["t"] + "x",
than adding to longName, e.g. node["translate"] + "X"
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["translate"] + "X"
5.0
>>> node["t"] + "x"
5.0
>>> try:
... node["t"] + node["r"]
... except TypeError:
... error = True
...
>>> error
True
|
cmdx.py
|
__add__
|
fvbehr/cmdx
|
python
|
def __add__(self, other):
'Support legacy add string to plug\n\n Note:\n Adding to short name is faster, e.g. node["t"] + "x",\n than adding to longName, e.g. node["translate"] + "X"\n\n Example:\n >>> node = createNode("transform")\n >>> node["tx"] = 5\n >>> node["translate"] + "X"\n 5.0\n >>> node["t"] + "x"\n 5.0\n >>> try:\n ... node["t"] + node["r"]\n ... except TypeError:\n ... error = True\n ...\n >>> error\n True\n\n '
if isinstance(other, str):
try:
return self._node[(self.name() + other)]
except ExistError:
return self._node[(self.name(long=True) + other)]
raise TypeError(("unsupported operand type(s) for +: 'Plug' and '%s'" % type(other)))
|
def __iadd__(self, other):
'Support += operator, for .append()\n\n Example:\n >>> node = createNode("transform")\n >>> node["myArray"] = Double(array=True)\n >>> node["myArray"].append(1.0)\n >>> node["myArray"].extend([2.0, 3.0])\n >>> node["myArray"] += 5.1\n >>> node["myArray"] += [1.1, 2.3, 999.0]\n >>> node["myArray"][0]\n 1.0\n >>> node["myArray"][6]\n 999.0\n >>> node["myArray"][-1]\n 999.0\n\n '
if isinstance(other, (tuple, list)):
for entry in other:
self.append(entry)
else:
self.append(other)
return self
| 8,823,742,964,830,426,000
|
Support += operator, for .append()
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["myArray"].extend([2.0, 3.0])
>>> node["myArray"] += 5.1
>>> node["myArray"] += [1.1, 2.3, 999.0]
>>> node["myArray"][0]
1.0
>>> node["myArray"][6]
999.0
>>> node["myArray"][-1]
999.0
|
cmdx.py
|
__iadd__
|
fvbehr/cmdx
|
python
|
def __iadd__(self, other):
'Support += operator, for .append()\n\n Example:\n >>> node = createNode("transform")\n >>> node["myArray"] = Double(array=True)\n >>> node["myArray"].append(1.0)\n >>> node["myArray"].extend([2.0, 3.0])\n >>> node["myArray"] += 5.1\n >>> node["myArray"] += [1.1, 2.3, 999.0]\n >>> node["myArray"][0]\n 1.0\n >>> node["myArray"][6]\n 999.0\n >>> node["myArray"][-1]\n 999.0\n\n '
if isinstance(other, (tuple, list)):
for entry in other:
self.append(entry)
else:
self.append(other)
return self
|
def __str__(self):
'Return value as str\n\n Example:\n >>> node = createNode("transform")\n >>> str(node["tx"])\n \'0.0\'\n\n '
return str(self.read())
| 6,129,550,338,742,648,000
|
Return value as str
Example:
>>> node = createNode("transform")
>>> str(node["tx"])
'0.0'
|
cmdx.py
|
__str__
|
fvbehr/cmdx
|
python
|
def __str__(self):
'Return value as str\n\n Example:\n >>> node = createNode("transform")\n >>> str(node["tx"])\n \'0.0\'\n\n '
return str(self.read())
|
def __rshift__(self, other):
'Support connecting attributes via A >> B'
self.connect(other)
| -2,720,337,759,746,114,600
|
Support connecting attributes via A >> B
|
cmdx.py
|
__rshift__
|
fvbehr/cmdx
|
python
|
def __rshift__(self, other):
self.connect(other)
|
def __lshift__(self, other):
'Support connecting attributes via A << B'
other.connect(self)
| 8,315,847,255,192,761,000
|
Support connecting attributes via A << B
|
cmdx.py
|
__lshift__
|
fvbehr/cmdx
|
python
|
def __lshift__(self, other):
other.connect(self)
|
def __floordiv__(self, other):
'Disconnect attribute via A // B\n\n Example:\n >>> nodeA = createNode("transform")\n >>> nodeB = createNode("transform")\n >>> nodeA["tx"] >> nodeB["tx"]\n >>> nodeA["tx"] = 5\n >>> nodeB["tx"] == 5\n True\n >>> nodeA["tx"] // nodeB["tx"]\n >>> nodeA["tx"] = 0\n >>> nodeB["tx"] == 5\n True\n\n '
self.disconnect(other)
| -4,267,484,824,930,592,000
|
Disconnect attribute via A // B
Example:
>>> nodeA = createNode("transform")
>>> nodeB = createNode("transform")
>>> nodeA["tx"] >> nodeB["tx"]
>>> nodeA["tx"] = 5
>>> nodeB["tx"] == 5
True
>>> nodeA["tx"] // nodeB["tx"]
>>> nodeA["tx"] = 0
>>> nodeB["tx"] == 5
True
|
cmdx.py
|
__floordiv__
|
fvbehr/cmdx
|
python
|
def __floordiv__(self, other):
'Disconnect attribute via A // B\n\n Example:\n >>> nodeA = createNode("transform")\n >>> nodeB = createNode("transform")\n >>> nodeA["tx"] >> nodeB["tx"]\n >>> nodeA["tx"] = 5\n >>> nodeB["tx"] == 5\n True\n >>> nodeA["tx"] // nodeB["tx"]\n >>> nodeA["tx"] = 0\n >>> nodeB["tx"] == 5\n True\n\n '
self.disconnect(other)
|
def __iter__(self):
'Iterate over value as a tuple\n\n Example:\n >>> node = createNode("transform")\n >>> node["translate"] = (0, 1, 2)\n >>> for index, axis in enumerate(node["translate"]):\n ... assert axis == float(index)\n ... assert isinstance(axis, Plug)\n ...\n >>> a = createNode("transform")\n >>> a["myArray"] = Message(array=True)\n >>> b = createNode("transform")\n >>> c = createNode("transform")\n >>> a["myArray"][0] << b["message"]\n >>> a["myArray"][1] << c["message"]\n >>> a["myArray"][0] in list(a["myArray"])\n True\n >>> a["myArray"][1] in list(a["myArray"])\n True\n >>> for single in node["visibility"]:\n ... print(single)\n ...\n True\n >>> node = createNode("wtAddMatrix")\n >>> node["wtMatrix"][0]["weightIn"] = 1.0\n\n '
if self._mplug.isArray:
for index in self._mplug.getExistingArrayAttributeIndices():
(yield self[index])
elif self._mplug.isCompound:
for index in range(self._mplug.numChildren()):
(yield self[index])
else:
values = self.read()
values = (values if isinstance(values, (tuple, list)) else [values])
for value in values:
(yield value)
| 1,250,772,775,994,694,700
|
Iterate over value as a tuple
Example:
>>> node = createNode("transform")
>>> node["translate"] = (0, 1, 2)
>>> for index, axis in enumerate(node["translate"]):
... assert axis == float(index)
... assert isinstance(axis, Plug)
...
>>> a = createNode("transform")
>>> a["myArray"] = Message(array=True)
>>> b = createNode("transform")
>>> c = createNode("transform")
>>> a["myArray"][0] << b["message"]
>>> a["myArray"][1] << c["message"]
>>> a["myArray"][0] in list(a["myArray"])
True
>>> a["myArray"][1] in list(a["myArray"])
True
>>> for single in node["visibility"]:
... print(single)
...
True
>>> node = createNode("wtAddMatrix")
>>> node["wtMatrix"][0]["weightIn"] = 1.0
|
cmdx.py
|
__iter__
|
fvbehr/cmdx
|
python
|
def __iter__(self):
'Iterate over value as a tuple\n\n Example:\n >>> node = createNode("transform")\n >>> node["translate"] = (0, 1, 2)\n >>> for index, axis in enumerate(node["translate"]):\n ... assert axis == float(index)\n ... assert isinstance(axis, Plug)\n ...\n >>> a = createNode("transform")\n >>> a["myArray"] = Message(array=True)\n >>> b = createNode("transform")\n >>> c = createNode("transform")\n >>> a["myArray"][0] << b["message"]\n >>> a["myArray"][1] << c["message"]\n >>> a["myArray"][0] in list(a["myArray"])\n True\n >>> a["myArray"][1] in list(a["myArray"])\n True\n >>> for single in node["visibility"]:\n ... print(single)\n ...\n True\n >>> node = createNode("wtAddMatrix")\n >>> node["wtMatrix"][0]["weightIn"] = 1.0\n\n '
if self._mplug.isArray:
for index in self._mplug.getExistingArrayAttributeIndices():
(yield self[index])
elif self._mplug.isCompound:
for index in range(self._mplug.numChildren()):
(yield self[index])
else:
values = self.read()
values = (values if isinstance(values, (tuple, list)) else [values])
for value in values:
(yield value)
|
def __getitem__(self, index):
'Read from child of array or compound plug\n\n Arguments:\n index (int): Logical index of plug (NOT physical, make note)\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> node = createNode("transform", name="mynode")\n >>> node["translate"][0].read()\n 0.0\n >>> node["visibility"][0]\n Traceback (most recent call last):\n ...\n TypeError: |mynode.visibility does not support indexing\n >>> node["translate"][2] = 5.1\n >>> node["translate"][2].read()\n 5.1\n\n '
cls = self.__class__
if isinstance(index, int):
if (index < 0):
index = (self.count() - abs(index))
if self._mplug.isArray:
item = self._mplug.elementByLogicalIndex(index)
return cls(self._node, item, self._unit)
elif self._mplug.isCompound:
item = self._mplug.child(index)
return cls(self._node, item, self._unit)
else:
raise TypeError(('%s does not support indexing' % self.path()))
elif isinstance(index, string_types):
if self._mplug.isCompound:
for child in range(self._mplug.numChildren()):
child = self._mplug.child(child)
(_, name) = child.name().rsplit('.', 1)
if (index == name):
return cls(self._node, child)
else:
raise TypeError(("'%s' is not a compound attribute" % self.path()))
raise ExistError(("'%s' was not found" % index))
| 6,862,531,856,520,929,000
|
Read from child of array or compound plug
Arguments:
index (int): Logical index of plug (NOT physical, make note)
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="mynode")
>>> node["translate"][0].read()
0.0
>>> node["visibility"][0]
Traceback (most recent call last):
...
TypeError: |mynode.visibility does not support indexing
>>> node["translate"][2] = 5.1
>>> node["translate"][2].read()
5.1
|
cmdx.py
|
__getitem__
|
fvbehr/cmdx
|
python
|
def __getitem__(self, index):
'Read from child of array or compound plug\n\n Arguments:\n index (int): Logical index of plug (NOT physical, make note)\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> node = createNode("transform", name="mynode")\n >>> node["translate"][0].read()\n 0.0\n >>> node["visibility"][0]\n Traceback (most recent call last):\n ...\n TypeError: |mynode.visibility does not support indexing\n >>> node["translate"][2] = 5.1\n >>> node["translate"][2].read()\n 5.1\n\n '
cls = self.__class__
if isinstance(index, int):
if (index < 0):
index = (self.count() - abs(index))
if self._mplug.isArray:
item = self._mplug.elementByLogicalIndex(index)
return cls(self._node, item, self._unit)
elif self._mplug.isCompound:
item = self._mplug.child(index)
return cls(self._node, item, self._unit)
else:
raise TypeError(('%s does not support indexing' % self.path()))
elif isinstance(index, string_types):
if self._mplug.isCompound:
for child in range(self._mplug.numChildren()):
child = self._mplug.child(child)
(_, name) = child.name().rsplit('.', 1)
if (index == name):
return cls(self._node, child)
else:
raise TypeError(("'%s' is not a compound attribute" % self.path()))
raise ExistError(("'%s' was not found" % index))
|
def __setitem__(self, index, value):
'Write to child of array or compound plug\n\n Example:\n >>> node = createNode("transform")\n >>> node["translate"][0] = 5\n >>> node["tx"]\n 5.0\n\n '
self[index].write(value)
| -2,246,581,550,528,541,700
|
Write to child of array or compound plug
Example:
>>> node = createNode("transform")
>>> node["translate"][0] = 5
>>> node["tx"]
5.0
|
cmdx.py
|
__setitem__
|
fvbehr/cmdx
|
python
|
def __setitem__(self, index, value):
'Write to child of array or compound plug\n\n Example:\n >>> node = createNode("transform")\n >>> node["translate"][0] = 5\n >>> node["tx"]\n 5.0\n\n '
self[index].write(value)
|
def __init__(self, node, mplug, unit=None, key=None, modifier=None):
'A Maya plug\n\n Arguments:\n node (Node): Parent Node of plug\n mplug (maya.api.OpenMaya.MPlug): Internal Maya plug\n unit (int, optional): Unit with which to read plug\n\n '
assert isinstance(node, Node), ('%s is not a Node' % node)
self._node = node
self._mplug = mplug
self._unit = unit
self._cached = None
self._key = key
self._modifier = modifier
| 7,909,641,668,024,552,000
|
A Maya plug
Arguments:
node (Node): Parent Node of plug
mplug (maya.api.OpenMaya.MPlug): Internal Maya plug
unit (int, optional): Unit with which to read plug
|
cmdx.py
|
__init__
|
fvbehr/cmdx
|
python
|
def __init__(self, node, mplug, unit=None, key=None, modifier=None):
'A Maya plug\n\n Arguments:\n node (Node): Parent Node of plug\n mplug (maya.api.OpenMaya.MPlug): Internal Maya plug\n unit (int, optional): Unit with which to read plug\n\n '
assert isinstance(node, Node), ('%s is not a Node' % node)
self._node = node
self._mplug = mplug
self._unit = unit
self._cached = None
self._key = key
self._modifier = modifier
|
def append(self, value):
'Add `value` to end of self, which is an array\n\n Arguments:\n value (object): If value, create a new entry and append it.\n If cmdx.Plug, create a new entry and connect it.\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> node = createNode("transform", name="appendTest")\n >>> node["myArray"] = Double(array=True)\n >>> node["myArray"].append(1.0)\n >>> node["notArray"] = Double()\n >>> node["notArray"].append(2.0)\n Traceback (most recent call last):\n ...\n TypeError: "|appendTest.notArray" was not an array attribute\n\n '
if (not self._mplug.isArray):
raise TypeError(('"%s" was not an array attribute' % self.path()))
index = self.count()
if isinstance(value, Plug):
(self[index] << value)
else:
self[index].write(value)
| -5,210,446,223,402,379,000
|
Add `value` to end of self, which is an array
Arguments:
value (object): If value, create a new entry and append it.
If cmdx.Plug, create a new entry and connect it.
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="appendTest")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["notArray"] = Double()
>>> node["notArray"].append(2.0)
Traceback (most recent call last):
...
TypeError: "|appendTest.notArray" was not an array attribute
|
cmdx.py
|
append
|
fvbehr/cmdx
|
python
|
def append(self, value):
'Add `value` to end of self, which is an array\n\n Arguments:\n value (object): If value, create a new entry and append it.\n If cmdx.Plug, create a new entry and connect it.\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> node = createNode("transform", name="appendTest")\n >>> node["myArray"] = Double(array=True)\n >>> node["myArray"].append(1.0)\n >>> node["notArray"] = Double()\n >>> node["notArray"].append(2.0)\n Traceback (most recent call last):\n ...\n TypeError: "|appendTest.notArray" was not an array attribute\n\n '
if (not self._mplug.isArray):
raise TypeError(('"%s" was not an array attribute' % self.path()))
index = self.count()
if isinstance(value, Plug):
(self[index] << value)
else:
self[index].write(value)
|
def extend(self, values):
'Append multiple values to the end of an array\n\n Arguments:\n values (tuple): If values, create a new entry and append it.\n If cmdx.Plug\'s, create a new entry and connect it.\n\n Example:\n >>> node = createNode("transform")\n >>> node["myArray"] = Double(array=True)\n >>> node["myArray"].extend([1.0, 2.0, 3.0])\n >>> node["myArray"][0]\n 1.0\n >>> node["myArray"][-1]\n 3.0\n\n '
for value in values:
self.append(value)
| 1,190,274,450,193,964,800
|
Append multiple values to the end of an array
Arguments:
values (tuple): If values, create a new entry and append it.
If cmdx.Plug's, create a new entry and connect it.
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].extend([1.0, 2.0, 3.0])
>>> node["myArray"][0]
1.0
>>> node["myArray"][-1]
3.0
|
cmdx.py
|
extend
|
fvbehr/cmdx
|
python
|
def extend(self, values):
'Append multiple values to the end of an array\n\n Arguments:\n values (tuple): If values, create a new entry and append it.\n If cmdx.Plug\'s, create a new entry and connect it.\n\n Example:\n >>> node = createNode("transform")\n >>> node["myArray"] = Double(array=True)\n >>> node["myArray"].extend([1.0, 2.0, 3.0])\n >>> node["myArray"][0]\n 1.0\n >>> node["myArray"][-1]\n 3.0\n\n '
for value in values:
self.append(value)
|
def asDouble(self, time=None):
'Return plug as double (Python float)\n\n Example:\n >>> node = createNode("transform")\n >>> node["translateX"] = 5.0\n >>> node["translateX"].asDouble()\n 5.0\n\n '
if (time is not None):
return self._mplug.asDouble(DGContext(time=time))
return self._mplug.asDouble()
| 7,789,230,498,371,087,000
|
Return plug as double (Python float)
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5.0
>>> node["translateX"].asDouble()
5.0
|
cmdx.py
|
asDouble
|
fvbehr/cmdx
|
python
|
def asDouble(self, time=None):
'Return plug as double (Python float)\n\n Example:\n >>> node = createNode("transform")\n >>> node["translateX"] = 5.0\n >>> node["translateX"].asDouble()\n 5.0\n\n '
if (time is not None):
return self._mplug.asDouble(DGContext(time=time))
return self._mplug.asDouble()
|
def asMatrix(self, time=None):
'Return plug as MatrixType\n\n Example:\n >>> node1 = createNode("transform")\n >>> node2 = createNode("transform", parent=node1)\n >>> node1["translate"] = (0, 5, 0)\n >>> node2["translate"] = (0, 5, 0)\n >>> plug1 = node1["matrix"]\n >>> plug2 = node2["worldMatrix"][0]\n >>> mat1 = plug1.asMatrix()\n >>> mat2 = plug2.asMatrix()\n >>> mat = mat1 * mat2\n >>> tm = TransformationMatrix(mat)\n >>> list(tm.translation())\n [0.0, 15.0, 0.0]\n\n '
if (time is not None):
context = DGContext(time=time)
obj = self._mplug.asMObject(context)
else:
obj = self._mplug.asMObject()
return om.MFnMatrixData(obj).matrix()
| 6,023,091,757,034,787,000
|
Return plug as MatrixType
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform", parent=node1)
>>> node1["translate"] = (0, 5, 0)
>>> node2["translate"] = (0, 5, 0)
>>> plug1 = node1["matrix"]
>>> plug2 = node2["worldMatrix"][0]
>>> mat1 = plug1.asMatrix()
>>> mat2 = plug2.asMatrix()
>>> mat = mat1 * mat2
>>> tm = TransformationMatrix(mat)
>>> list(tm.translation())
[0.0, 15.0, 0.0]
|
cmdx.py
|
asMatrix
|
fvbehr/cmdx
|
python
|
def asMatrix(self, time=None):
'Return plug as MatrixType\n\n Example:\n >>> node1 = createNode("transform")\n >>> node2 = createNode("transform", parent=node1)\n >>> node1["translate"] = (0, 5, 0)\n >>> node2["translate"] = (0, 5, 0)\n >>> plug1 = node1["matrix"]\n >>> plug2 = node2["worldMatrix"][0]\n >>> mat1 = plug1.asMatrix()\n >>> mat2 = plug2.asMatrix()\n >>> mat = mat1 * mat2\n >>> tm = TransformationMatrix(mat)\n >>> list(tm.translation())\n [0.0, 15.0, 0.0]\n\n '
if (time is not None):
context = DGContext(time=time)
obj = self._mplug.asMObject(context)
else:
obj = self._mplug.asMObject()
return om.MFnMatrixData(obj).matrix()
|
def asTransformationMatrix(self, time=None):
'Return plug as TransformationMatrix\n\n Example:\n >>> node = createNode("transform")\n >>> node["translateY"] = 12\n >>> node["rotate"] = 1\n >>> tm = node["matrix"].asTm()\n >>> map(round, tm.rotation())\n [1.0, 1.0, 1.0]\n >>> list(tm.translation())\n [0.0, 12.0, 0.0]\n\n '
return TransformationMatrix(self.asMatrix(time))
| -6,577,919,965,052,642,000
|
Return plug as TransformationMatrix
Example:
>>> node = createNode("transform")
>>> node["translateY"] = 12
>>> node["rotate"] = 1
>>> tm = node["matrix"].asTm()
>>> map(round, tm.rotation())
[1.0, 1.0, 1.0]
>>> list(tm.translation())
[0.0, 12.0, 0.0]
|
cmdx.py
|
asTransformationMatrix
|
fvbehr/cmdx
|
python
|
def asTransformationMatrix(self, time=None):
'Return plug as TransformationMatrix\n\n Example:\n >>> node = createNode("transform")\n >>> node["translateY"] = 12\n >>> node["rotate"] = 1\n >>> tm = node["matrix"].asTm()\n >>> map(round, tm.rotation())\n [1.0, 1.0, 1.0]\n >>> list(tm.translation())\n [0.0, 12.0, 0.0]\n\n '
return TransformationMatrix(self.asMatrix(time))
|
@property
def connected(self):
'Return whether or not this attribute is connected (to anything)'
return (self.connection() is not None)
| -914,703,285,969,393,700
|
Return whether or not this attribute is connected (to anything)
|
cmdx.py
|
connected
|
fvbehr/cmdx
|
python
|
@property
def connected(self):
return (self.connection() is not None)
|
@locked.setter
def locked(self, value):
'Lock attribute'
elements = (self if (self.isArray or self.isCompound) else [self])
for el in elements:
cmds.setAttr(el.path(), lock=value)
| -776,994,980,534,713,500
|
Lock attribute
|
cmdx.py
|
locked
|
fvbehr/cmdx
|
python
|
@locked.setter
def locked(self, value):
elements = (self if (self.isArray or self.isCompound) else [self])
for el in elements:
cmds.setAttr(el.path(), lock=value)
|
@property
def channelBox(self):
'Is the attribute visible in the Channel Box?'
if (self.isArray or self.isCompound):
return all((plug._mplug.isChannelBox for plug in self))
else:
return self._mplug.isChannelBox
| -8,505,339,440,170,590,000
|
Is the attribute visible in the Channel Box?
|
cmdx.py
|
channelBox
|
fvbehr/cmdx
|
python
|
@property
def channelBox(self):
if (self.isArray or self.isCompound):
return all((plug._mplug.isChannelBox for plug in self))
else:
return self._mplug.isChannelBox
|
@property
def keyable(self):
'Is the attribute keyable?'
if (self.isArray or self.isCompound):
return all((plug._mplug.isKeyable for plug in self))
else:
return self._mplug.isKeyable
| -6,111,402,977,931,448,000
|
Is the attribute keyable?
|
cmdx.py
|
keyable
|
fvbehr/cmdx
|
python
|
@property
def keyable(self):
if (self.isArray or self.isCompound):
return all((plug._mplug.isKeyable for plug in self))
else:
return self._mplug.isKeyable
|
def hide(self):
'Hide attribute from channel box\n\n Note: An attribute cannot be hidden from the channel box\n and keyable at the same time. Therefore, this method\n also makes the attribute non-keyable.\n\n Supports array and compound attributes too.\n\n '
self.keyable = False
self.channelBox = False
| 7,086,864,995,649,053,000
|
Hide attribute from channel box
Note: An attribute cannot be hidden from the channel box
and keyable at the same time. Therefore, this method
also makes the attribute non-keyable.
Supports array and compound attributes too.
|
cmdx.py
|
hide
|
fvbehr/cmdx
|
python
|
def hide(self):
'Hide attribute from channel box\n\n Note: An attribute cannot be hidden from the channel box\n and keyable at the same time. Therefore, this method\n also makes the attribute non-keyable.\n\n Supports array and compound attributes too.\n\n '
self.keyable = False
self.channelBox = False
|
@property
def default(self):
'Return default value of plug'
return _plug_to_default(self._mplug)
| -2,075,065,239,552,319,000
|
Return default value of plug
|
cmdx.py
|
default
|
fvbehr/cmdx
|
python
|
@property
def default(self):
return _plug_to_default(self._mplug)
|
def reset(self):
'Restore plug to default value'
if self.writable:
self.write(self.default)
else:
raise TypeError(("Cannot reset non-writable attribute '%s'" % self.path()))
| 4,564,896,471,130,789,000
|
Restore plug to default value
|
cmdx.py
|
reset
|
fvbehr/cmdx
|
python
|
def reset(self):
if self.writable:
self.write(self.default)
else:
raise TypeError(("Cannot reset non-writable attribute '%s'" % self.path()))
|
@property
def writable(self):
'Can the user write to this attribute?\n\n Convenience for combined call to `plug.connected`\n and `plug.locked`.\n\n Example:\n >> if node["translateX"].writable:\n .. node["translateX"] = 5\n\n '
return (not any([self.connected, self.locked]))
| 8,115,363,899,282,015,000
|
Can the user write to this attribute?
Convenience for combined call to `plug.connected`
and `plug.locked`.
Example:
>> if node["translateX"].writable:
.. node["translateX"] = 5
|
cmdx.py
|
writable
|
fvbehr/cmdx
|
python
|
@property
def writable(self):
'Can the user write to this attribute?\n\n Convenience for combined call to `plug.connected`\n and `plug.locked`.\n\n Example:\n >> if node["translateX"].writable:\n .. node["translateX"] = 5\n\n '
return (not any([self.connected, self.locked]))
|
def show(self):
'Show attribute in channel box\n\n Note: An attribute can be both visible in the channel box\n and non-keyable, therefore, unlike :func:`hide()`, this\n method does not alter the keyable state of the attribute.\n\n '
self.channelBox = True
| -7,542,682,929,514,979,000
|
Show attribute in channel box
Note: An attribute can be both visible in the channel box
and non-keyable, therefore, unlike :func:`hide()`, this
method does not alter the keyable state of the attribute.
|
cmdx.py
|
show
|
fvbehr/cmdx
|
python
|
def show(self):
'Show attribute in channel box\n\n Note: An attribute can be both visible in the channel box\n and non-keyable, therefore, unlike :func:`hide()`, this\n method does not alter the keyable state of the attribute.\n\n '
self.channelBox = True
|
def type(self):
'Retrieve API type of plug as string\n\n Example:\n >>> node = createNode("transform")\n >>> node["translate"].type()\n \'kAttribute3Double\'\n >>> node["translateX"].type()\n \'kDoubleLinearAttribute\'\n\n '
return self._mplug.attribute().apiTypeStr
| -6,392,659,328,201,751,000
|
Retrieve API type of plug as string
Example:
>>> node = createNode("transform")
>>> node["translate"].type()
'kAttribute3Double'
>>> node["translateX"].type()
'kDoubleLinearAttribute'
|
cmdx.py
|
type
|
fvbehr/cmdx
|
python
|
def type(self):
'Retrieve API type of plug as string\n\n Example:\n >>> node = createNode("transform")\n >>> node["translate"].type()\n \'kAttribute3Double\'\n >>> node["translateX"].type()\n \'kDoubleLinearAttribute\'\n\n '
return self._mplug.attribute().apiTypeStr
|
def read(self, unit=None, time=None):
'Read attribute value\n\n Arguments:\n unit (int, optional): Unit with which to read plug\n time (float, optional): Time at which to read plug\n\n Example:\n >>> node = createNode("transform")\n >>> node["ty"] = 100.0\n >>> node["ty"].read()\n 100.0\n >>> node["ty"].read(unit=Meters)\n 1.0\n\n '
unit = (unit if (unit is not None) else self._unit)
context = (None if (time is None) else DGContext(time=time))
try:
value = _plug_to_python(self._mplug, unit=unit, context=context)
self._node._state['values'][(self._key, unit)] = value
return value
except RuntimeError:
raise
except TypeError:
log.error(("'%s': failed to read attribute" % self.path()))
raise
| 1,548,194,270,824,116,700
|
Read attribute value
Arguments:
unit (int, optional): Unit with which to read plug
time (float, optional): Time at which to read plug
Example:
>>> node = createNode("transform")
>>> node["ty"] = 100.0
>>> node["ty"].read()
100.0
>>> node["ty"].read(unit=Meters)
1.0
|
cmdx.py
|
read
|
fvbehr/cmdx
|
python
|
def read(self, unit=None, time=None):
'Read attribute value\n\n Arguments:\n unit (int, optional): Unit with which to read plug\n time (float, optional): Time at which to read plug\n\n Example:\n >>> node = createNode("transform")\n >>> node["ty"] = 100.0\n >>> node["ty"].read()\n 100.0\n >>> node["ty"].read(unit=Meters)\n 1.0\n\n '
unit = (unit if (unit is not None) else self._unit)
context = (None if (time is None) else DGContext(time=time))
try:
value = _plug_to_python(self._mplug, unit=unit, context=context)
self._node._state['values'][(self._key, unit)] = value
return value
except RuntimeError:
raise
except TypeError:
log.error(("'%s': failed to read attribute" % self.path()))
raise
|
def disconnect(self, other=None, source=True, destination=True):
'Disconnect self from `other`\n\n Arguments:\n other (Plug, optional): If none is provided, disconnect everything\n\n Example:\n >>> node1 = createNode("transform")\n >>> node2 = createNode("transform")\n >>> node2["tx"].connection() is None\n True\n >>> node2["ty"].connection() is None\n True\n >>>\n >>> node2["tx"] << node1["tx"]\n >>> node2["ty"] << node1["ty"]\n >>> node2["ty"].connection() is None\n False\n >>> node2["tx"].connection() is None\n False\n >>>\n >>> node2["tx"].disconnect(node1["tx"])\n >>> node2["ty"].disconnect()\n >>> node2["tx"].connection() is None\n True\n >>> node2["ty"].connection() is None\n True\n\n '
other = getattr(other, '_mplug', None)
if (not getattr(self._modifier, 'isDone', True)):
mod = self._modifier
mod.disconnect(self._mplug, other, source, destination)
else:
mod = DGModifier()
mod.disconnect(self._mplug, other, source, destination)
mod.doIt()
| 840,204,997,532,550,300
|
Disconnect self from `other`
Arguments:
other (Plug, optional): If none is provided, disconnect everything
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform")
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
>>>
>>> node2["tx"] << node1["tx"]
>>> node2["ty"] << node1["ty"]
>>> node2["ty"].connection() is None
False
>>> node2["tx"].connection() is None
False
>>>
>>> node2["tx"].disconnect(node1["tx"])
>>> node2["ty"].disconnect()
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
|
cmdx.py
|
disconnect
|
fvbehr/cmdx
|
python
|
def disconnect(self, other=None, source=True, destination=True):
'Disconnect self from `other`\n\n Arguments:\n other (Plug, optional): If none is provided, disconnect everything\n\n Example:\n >>> node1 = createNode("transform")\n >>> node2 = createNode("transform")\n >>> node2["tx"].connection() is None\n True\n >>> node2["ty"].connection() is None\n True\n >>>\n >>> node2["tx"] << node1["tx"]\n >>> node2["ty"] << node1["ty"]\n >>> node2["ty"].connection() is None\n False\n >>> node2["tx"].connection() is None\n False\n >>>\n >>> node2["tx"].disconnect(node1["tx"])\n >>> node2["ty"].disconnect()\n >>> node2["tx"].connection() is None\n True\n >>> node2["ty"].connection() is None\n True\n\n '
other = getattr(other, '_mplug', None)
if (not getattr(self._modifier, 'isDone', True)):
mod = self._modifier
mod.disconnect(self._mplug, other, source, destination)
else:
mod = DGModifier()
mod.disconnect(self._mplug, other, source, destination)
mod.doIt()
|
def connections(self, type=None, source=True, destination=True, plugs=False, unit=None):
'Yield plugs connected to self\n\n Arguments:\n type (int, optional): Only return nodes of this type\n source (bool, optional): Return source plugs,\n default is True\n destination (bool, optional): Return destination plugs,\n default is True\n plugs (bool, optional): Return connected plugs instead of nodes\n unit (int, optional): Return plug in this unit, e.g. Meters\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", name="A")\n >>> b = createNode("multDoubleLinear", name="B")\n >>> a["ihi"] << b["ihi"]\n >>> a["ihi"].connection() == b\n True\n >>> b["ihi"].connection() == a\n True\n >>> a["ihi"]\n 2\n\n '
op = operator.eq
other = 'typeId'
if isinstance(type, string_types):
other = 'typeName'
if isinstance(type, (tuple, list)):
op = operator.contains
for plug in self._mplug.connectedTo(source, destination):
mobject = plug.node()
node = Node(mobject)
if ((not type) or op(type, getattr(node._fn, other))):
(yield (Plug(node, plug, unit) if plugs else node))
| -4,690,551,879,807,121,000
|
Yield plugs connected to self
Arguments:
type (int, optional): Only return nodes of this type
source (bool, optional): Return source plugs,
default is True
destination (bool, optional): Return destination plugs,
default is True
plugs (bool, optional): Return connected plugs instead of nodes
unit (int, optional): Return plug in this unit, e.g. Meters
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> a["ihi"].connection() == b
True
>>> b["ihi"].connection() == a
True
>>> a["ihi"]
2
|
cmdx.py
|
connections
|
fvbehr/cmdx
|
python
|
def connections(self, type=None, source=True, destination=True, plugs=False, unit=None):
'Yield plugs connected to self\n\n Arguments:\n type (int, optional): Only return nodes of this type\n source (bool, optional): Return source plugs,\n default is True\n destination (bool, optional): Return destination plugs,\n default is True\n plugs (bool, optional): Return connected plugs instead of nodes\n unit (int, optional): Return plug in this unit, e.g. Meters\n\n Example:\n >>> _ = cmds.file(new=True, force=True)\n >>> a = createNode("transform", name="A")\n >>> b = createNode("multDoubleLinear", name="B")\n >>> a["ihi"] << b["ihi"]\n >>> a["ihi"].connection() == b\n True\n >>> b["ihi"].connection() == a\n True\n >>> a["ihi"]\n 2\n\n '
op = operator.eq
other = 'typeId'
if isinstance(type, string_types):
other = 'typeName'
if isinstance(type, (tuple, list)):
op = operator.contains
for plug in self._mplug.connectedTo(source, destination):
mobject = plug.node()
node = Node(mobject)
if ((not type) or op(type, getattr(node._fn, other))):
(yield (Plug(node, plug, unit) if plugs else node))
|
def connection(self, type=None, source=True, destination=True, plug=False, unit=None):
'Return first connection from :func:`connections()`'
return next(self.connections(type=type, source=source, destination=destination, plugs=plug, unit=unit), None)
| -2,606,376,444,302,805,500
|
Return first connection from :func:`connections()`
|
cmdx.py
|
connection
|
fvbehr/cmdx
|
python
|
def connection(self, type=None, source=True, destination=True, plug=False, unit=None):
return next(self.connections(type=type, source=source, destination=destination, plugs=plug, unit=unit), None)
|
def rotateBy(self, rot, space=None):
'Handle arguments conveniently\n\n - Allow for optional `space` argument\n - Automatically convert tuple to Vector\n\n Arguments:\n rot (Vector, Quaternion): Rotation to add\n\n '
space = (space or sTransform)
if isinstance(rot, (tuple, list)):
rot = Vector(rot)
if isinstance(rot, om.MVector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).rotateBy(rot, space)
| -7,356,503,586,325,908,000
|
Handle arguments conveniently
- Allow for optional `space` argument
- Automatically convert tuple to Vector
Arguments:
rot (Vector, Quaternion): Rotation to add
|
cmdx.py
|
rotateBy
|
fvbehr/cmdx
|
python
|
def rotateBy(self, rot, space=None):
'Handle arguments conveniently\n\n - Allow for optional `space` argument\n - Automatically convert tuple to Vector\n\n Arguments:\n rot (Vector, Quaternion): Rotation to add\n\n '
space = (space or sTransform)
if isinstance(rot, (tuple, list)):
rot = Vector(rot)
if isinstance(rot, om.MVector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).rotateBy(rot, space)
|
def quaternion(self):
'Return transformation matrix as a Quaternion'
return Quaternion(self.rotation(asQuaternion=True))
| -1,817,595,353,253,382,100
|
Return transformation matrix as a Quaternion
|
cmdx.py
|
quaternion
|
fvbehr/cmdx
|
python
|
def quaternion(self):
return Quaternion(self.rotation(asQuaternion=True))
|
def rotatePivot(self, space=None):
'This method does not typically support optional arguments'
space = (space or sTransform)
return super(TransformationMatrix, self).rotatePivot(space)
| -4,865,172,239,268,790,000
|
This method does not typically support optional arguments
|
cmdx.py
|
rotatePivot
|
fvbehr/cmdx
|
python
|
def rotatePivot(self, space=None):
space = (space or sTransform)
return super(TransformationMatrix, self).rotatePivot(space)
|
def translation(self, space=None):
'This method does not typically support optional arguments'
space = (space or sTransform)
return super(TransformationMatrix, self).translation(space)
| 5,144,207,445,142,521,000
|
This method does not typically support optional arguments
|
cmdx.py
|
translation
|
fvbehr/cmdx
|
python
|
def translation(self, space=None):
space = (space or sTransform)
return super(TransformationMatrix, self).translation(space)
|
def scaleBy(self, space=None):
'This method does not typically support optional arguments'
space = (space or sTransform)
return Vector(super(TransformationMatrix, self).scale(space))
| -8,167,572,264,519,743,000
|
This method does not typically support optional arguments
|
cmdx.py
|
scaleBy
|
fvbehr/cmdx
|
python
|
def scaleBy(self, space=None):
space = (space or sTransform)
return Vector(super(TransformationMatrix, self).scale(space))
|
def setScale(self, seq, space=None):
'This method does not typically support optional arguments'
if isinstance(seq, Plug):
seq = seq.as_vector()
if isinstance(seq, (tuple, list)):
seq = Vector(*seq)
space = (space or sTransform)
return super(TransformationMatrix, self).setScale(seq, space)
| 3,128,911,056,447,938,600
|
This method does not typically support optional arguments
|
cmdx.py
|
setScale
|
fvbehr/cmdx
|
python
|
def setScale(self, seq, space=None):
if isinstance(seq, Plug):
seq = seq.as_vector()
if isinstance(seq, (tuple, list)):
seq = Vector(*seq)
space = (space or sTransform)
return super(TransformationMatrix, self).setScale(seq, space)
|
def setRotation(self, rot):
'Interpret three values as an euler rotation'
if isinstance(rot, Plug):
rot = rot.as_vector()
if isinstance(rot, (tuple, list)):
try:
rot = Vector(rot)
except ValueError:
traceback.print_exc()
raise ValueError("I tried automatically converting your tuple to a Vector, but couldn't..")
if isinstance(rot, Vector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).setRotation(rot)
| -1,908,348,611,452,193,500
|
Interpret three values as an euler rotation
|
cmdx.py
|
setRotation
|
fvbehr/cmdx
|
python
|
def setRotation(self, rot):
if isinstance(rot, Plug):
rot = rot.as_vector()
if isinstance(rot, (tuple, list)):
try:
rot = Vector(rot)
except ValueError:
traceback.print_exc()
raise ValueError("I tried automatically converting your tuple to a Vector, but couldn't..")
if isinstance(rot, Vector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).setRotation(rot)
|
def __call__(self, *item):
"Native API 2.0 MMatrix does not support indexing\n\n API 1.0 however *does*, except only for elements\n and not rows. Screw both of those, indexing isn't hard.\n\n Arguments:\n item (int, tuple): 1 integer for row, 2 for element\n\n Identity/default matrix:\n [[1.0, 0.0, 0.0, 0.0]]\n [[0.0, 1.0, 0.0, 0.0]]\n [[0.0, 0.0, 1.0, 0.0]]\n [[0.0, 0.0, 0.0, 1.0]]\n\n Example:\n >>> m = MatrixType()\n >>> m(0, 0)\n 1.0\n >>> m(0, 1)\n 0.0\n >>> m(1, 1)\n 1.0\n >>> m(2, 1)\n 0.0\n >>> m(3, 3)\n 1.0\n >>>\n >>> m(0)\n (1.0, 0.0, 0.0, 0.0)\n\n "
if (len(item) == 1):
return self.row(*item)
elif (len(item) == 2):
return self.element(*item)
else:
raise ValueError('Must provide either 1 or 2 coordinates, for row and element respectively')
| 8,799,916,385,701,850,000
|
Native API 2.0 MMatrix does not support indexing
API 1.0 however *does*, except only for elements
and not rows. Screw both of those, indexing isn't hard.
Arguments:
item (int, tuple): 1 integer for row, 2 for element
Identity/default matrix:
[[1.0, 0.0, 0.0, 0.0]]
[[0.0, 1.0, 0.0, 0.0]]
[[0.0, 0.0, 1.0, 0.0]]
[[0.0, 0.0, 0.0, 1.0]]
Example:
>>> m = MatrixType()
>>> m(0, 0)
1.0
>>> m(0, 1)
0.0
>>> m(1, 1)
1.0
>>> m(2, 1)
0.0
>>> m(3, 3)
1.0
>>>
>>> m(0)
(1.0, 0.0, 0.0, 0.0)
|
cmdx.py
|
__call__
|
fvbehr/cmdx
|
python
|
def __call__(self, *item):
"Native API 2.0 MMatrix does not support indexing\n\n API 1.0 however *does*, except only for elements\n and not rows. Screw both of those, indexing isn't hard.\n\n Arguments:\n item (int, tuple): 1 integer for row, 2 for element\n\n Identity/default matrix:\n [[1.0, 0.0, 0.0, 0.0]]\n [[0.0, 1.0, 0.0, 0.0]]\n [[0.0, 0.0, 1.0, 0.0]]\n [[0.0, 0.0, 0.0, 1.0]]\n\n Example:\n >>> m = MatrixType()\n >>> m(0, 0)\n 1.0\n >>> m(0, 1)\n 0.0\n >>> m(1, 1)\n 1.0\n >>> m(2, 1)\n 0.0\n >>> m(3, 3)\n 1.0\n >>>\n >>> m(0)\n (1.0, 0.0, 0.0, 0.0)\n\n "
if (len(item) == 1):
return self.row(*item)
elif (len(item) == 2):
return self.element(*item)
else:
raise ValueError('Must provide either 1 or 2 coordinates, for row and element respectively')
|
@record_history
def disconnect(self, a, b=None, source=True, destination=True):
'Disconnect `a` from `b`\n\n Arguments:\n a (Plug): Starting point of a connection\n b (Plug, optional): End point of a connection, defaults to all\n source (bool, optional): Disconnect b, if it is a source\n source (bool, optional): Disconnect b, if it is a destination\n\n Normally, Maya only performs a disconnect if the\n connection is incoming. Bidirectional\n\n disconnect(A, B) => OK\n __________ _________\n | | | |\n | nodeA o---->o nodeB |\n |__________| |_________|\n\n disconnect(B, A) => NO\n __________ _________\n | | | |\n | nodeA o---->o nodeB |\n |__________| |_________|\n\n '
if isinstance(a, Plug):
a = a._mplug
if isinstance(b, Plug):
b = b._mplug
if (b is None):
if source:
for plug in a.connectedTo(True, False):
self._modifier.disconnect(plug, a)
if destination:
for plug in a.connectedTo(False, True):
self._modifier.disconnect(a, plug)
else:
if source:
self._modifier.disconnect(a, b)
if destination:
self._modifier.disconnect(b, a)
| 142,448,763,885,116,640
|
Disconnect `a` from `b`
Arguments:
a (Plug): Starting point of a connection
b (Plug, optional): End point of a connection, defaults to all
source (bool, optional): Disconnect b, if it is a source
source (bool, optional): Disconnect b, if it is a destination
Normally, Maya only performs a disconnect if the
connection is incoming. Bidirectional
disconnect(A, B) => OK
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
disconnect(B, A) => NO
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
|
cmdx.py
|
disconnect
|
fvbehr/cmdx
|
python
|
@record_history
def disconnect(self, a, b=None, source=True, destination=True):
'Disconnect `a` from `b`\n\n Arguments:\n a (Plug): Starting point of a connection\n b (Plug, optional): End point of a connection, defaults to all\n source (bool, optional): Disconnect b, if it is a source\n source (bool, optional): Disconnect b, if it is a destination\n\n Normally, Maya only performs a disconnect if the\n connection is incoming. Bidirectional\n\n disconnect(A, B) => OK\n __________ _________\n | | | |\n | nodeA o---->o nodeB |\n |__________| |_________|\n\n disconnect(B, A) => NO\n __________ _________\n | | | |\n | nodeA o---->o nodeB |\n |__________| |_________|\n\n '
if isinstance(a, Plug):
a = a._mplug
if isinstance(b, Plug):
b = b._mplug
if (b is None):
if source:
for plug in a.connectedTo(True, False):
self._modifier.disconnect(plug, a)
if destination:
for plug in a.connectedTo(False, True):
self._modifier.disconnect(a, plug)
else:
if source:
self._modifier.disconnect(a, b)
if destination:
self._modifier.disconnect(b, a)
|
def __init__(self, time=None):
'Context for evaluating the Maya DG\n\n Extension of MDGContext to also accept time as a float. In Maya 2018\n and above DGContext can also be used as a context manager.\n\n Arguments:\n time (float, om.MTime, optional): Time at which to evaluate context\n\n '
if (time is not None):
if isinstance(time, (int, float)):
time = om.MTime(time, om.MTime.uiUnit())
super(DGContext, self).__init__(time)
else:
super(DGContext, self).__init__()
self._previousContext = None
| 124,309,355,993,588,350
|
Context for evaluating the Maya DG
Extension of MDGContext to also accept time as a float. In Maya 2018
and above DGContext can also be used as a context manager.
Arguments:
time (float, om.MTime, optional): Time at which to evaluate context
|
cmdx.py
|
__init__
|
fvbehr/cmdx
|
python
|
def __init__(self, time=None):
'Context for evaluating the Maya DG\n\n Extension of MDGContext to also accept time as a float. In Maya 2018\n and above DGContext can also be used as a context manager.\n\n Arguments:\n time (float, om.MTime, optional): Time at which to evaluate context\n\n '
if (time is not None):
if isinstance(time, (int, float)):
time = om.MTime(time, om.MTime.uiUnit())
super(DGContext, self).__init__(time)
else:
super(DGContext, self).__init__()
self._previousContext = None
|
def __hash__(self):
'Support storing in set()'
return hash(self['name'])
| -4,552,424,402,059,984,000
|
Support storing in set()
|
cmdx.py
|
__hash__
|
fvbehr/cmdx
|
python
|
def __hash__(self):
return hash(self['name'])
|
def __repr__(self):
'Avoid repr depicting the full contents of this dict'
return self['name']
| 2,973,130,170,494,998,000
|
Avoid repr depicting the full contents of this dict
|
cmdx.py
|
__repr__
|
fvbehr/cmdx
|
python
|
def __repr__(self):
return self['name']
|
def __new__(cls, *args, **kwargs):
'Support for using name of assignment\n\n Example:\n node["thisName"] = cmdx.Double()\n\n In this example, the attribute isn\'t given a `name`\n Instead, the name is inferred from where it is assigned.\n\n '
if (not args):
return (cls, kwargs)
return super(_AbstractAttribute, cls).__new__(cls, *args, **kwargs)
| 1,672,795,968,862,260,700
|
Support for using name of assignment
Example:
node["thisName"] = cmdx.Double()
In this example, the attribute isn't given a `name`
Instead, the name is inferred from where it is assigned.
|
cmdx.py
|
__new__
|
fvbehr/cmdx
|
python
|
def __new__(cls, *args, **kwargs):
'Support for using name of assignment\n\n Example:\n node["thisName"] = cmdx.Double()\n\n In this example, the attribute isn\'t given a `name`\n Instead, the name is inferred from where it is assigned.\n\n '
if (not args):
return (cls, kwargs)
return super(_AbstractAttribute, cls).__new__(cls, *args, **kwargs)
|
def default(self, cls=None):
'Return one of three available values\n\n Resolution order:\n 1. Argument\n 2. Node default (from cls.defaults)\n 3. Attribute default\n\n '
if (self['default'] is not None):
return self['default']
if (cls is not None):
return cls.defaults.get(self['name'], self.Default)
return self.Default
| 98,349,412,122,129,840
|
Return one of three available values
Resolution order:
1. Argument
2. Node default (from cls.defaults)
3. Attribute default
|
cmdx.py
|
default
|
fvbehr/cmdx
|
python
|
def default(self, cls=None):
'Return one of three available values\n\n Resolution order:\n 1. Argument\n 2. Node default (from cls.defaults)\n 3. Attribute default\n\n '
if (self['default'] is not None):
return self['default']
if (cls is not None):
return cls.defaults.get(self['name'], self.Default)
return self.Default
|
def read(self, handle):
'Read from MDataHandle'
output = list()
for child in self['children']:
child_handle = handle.child(child['mobject'])
output.append(child.read(child_handle))
return tuple(output)
| -2,664,895,905,616,412,700
|
Read from MDataHandle
|
cmdx.py
|
read
|
fvbehr/cmdx
|
python
|
def read(self, handle):
output = list()
for child in self['children']:
child_handle = handle.child(child['mobject'])
output.append(child.read(child_handle))
return tuple(output)
|
def add(self, name, installer, args, api=2):
'Convenience method for .append(Callback())'
callback = Callback(name, installer, args, api)
self.append(callback)
| -1,907,981,419,934,310,000
|
Convenience method for .append(Callback())
|
cmdx.py
|
add
|
fvbehr/cmdx
|
python
|
def add(self, name, installer, args, api=2):
callback = Callback(name, installer, args, api)
self.append(callback)
|
def descendents(self, type=None):
'Faster and more efficient dependency graph traversal\n\n Requires Maya 2017+\n\n Example:\n >>> grandparent = createNode("transform")\n >>> parent = createNode("transform", parent=grandparent)\n >>> child = createNode("transform", parent=parent)\n >>> mesh = createNode("mesh", parent=child)\n >>> it = grandparent.descendents(type=tMesh)\n >>> next(it) == mesh\n True\n >>> next(it)\n Traceback (most recent call last):\n ...\n StopIteration\n\n '
type = (type or om.MFn.kInvalid)
typeName = None
if isinstance(type, string_types):
typeName = type
type = om.MFn.kInvalid
it = om.MItDag(om.MItDag.kDepthFirst, om.MFn.kInvalid)
it.reset(self._mobject, om.MItDag.kDepthFirst, om.MIteratorType.kMObject)
it.next()
while (not it.isDone()):
mobj = it.currentItem()
node = DagNode(mobj)
if (typeName is None):
if ((not type) or (type == node._fn.typeId)):
(yield node)
elif ((not typeName) or (typeName == node._fn.typeName)):
(yield node)
it.next()
| -5,788,129,182,698,006,000
|
Faster and more efficient dependency graph traversal
Requires Maya 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
|
cmdx.py
|
descendents
|
fvbehr/cmdx
|
python
|
def descendents(self, type=None):
'Faster and more efficient dependency graph traversal\n\n Requires Maya 2017+\n\n Example:\n >>> grandparent = createNode("transform")\n >>> parent = createNode("transform", parent=grandparent)\n >>> child = createNode("transform", parent=parent)\n >>> mesh = createNode("mesh", parent=child)\n >>> it = grandparent.descendents(type=tMesh)\n >>> next(it) == mesh\n True\n >>> next(it)\n Traceback (most recent call last):\n ...\n StopIteration\n\n '
type = (type or om.MFn.kInvalid)
typeName = None
if isinstance(type, string_types):
typeName = type
type = om.MFn.kInvalid
it = om.MItDag(om.MItDag.kDepthFirst, om.MFn.kInvalid)
it.reset(self._mobject, om.MItDag.kDepthFirst, om.MIteratorType.kMObject)
it.next()
while (not it.isDone()):
mobj = it.currentItem()
node = DagNode(mobj)
if (typeName is None):
if ((not type) or (type == node._fn.typeId)):
(yield node)
elif ((not typeName) or (typeName == node._fn.typeName)):
(yield node)
it.next()
|
def descendents(self, type=None):
'Recursive, depth-first search; compliant with MItDag of 2017+\n\n Example:\n >>> grandparent = createNode("transform")\n >>> parent = createNode("transform", parent=grandparent)\n >>> child = createNode("transform", parent=parent)\n >>> mesh = createNode("mesh", parent=child)\n >>> it = grandparent.descendents(type=tMesh)\n >>> next(it) == mesh\n True\n >>> next(it)\n Traceback (most recent call last):\n ...\n StopIteration\n\n '
def _descendents(node, children=None):
children = (children or list())
children.append(node)
for child in node.children(filter=None):
_descendents(child, children)
return children
typeName = None
if isinstance(type, str):
typeName = type
type = om.MFn.kInvalid
descendents = _descendents(self)[1:]
for child in descendents:
if (typeName is None):
if ((not type) or (type == child._fn.typeId)):
(yield child)
elif ((not typeName) or (typeName == child._fn.typeName)):
(yield child)
| 171,838,027,484,087,870
|
Recursive, depth-first search; compliant with MItDag of 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
|
cmdx.py
|
descendents
|
fvbehr/cmdx
|
python
|
def descendents(self, type=None):
'Recursive, depth-first search; compliant with MItDag of 2017+\n\n Example:\n >>> grandparent = createNode("transform")\n >>> parent = createNode("transform", parent=grandparent)\n >>> child = createNode("transform", parent=parent)\n >>> mesh = createNode("mesh", parent=child)\n >>> it = grandparent.descendents(type=tMesh)\n >>> next(it) == mesh\n True\n >>> next(it)\n Traceback (most recent call last):\n ...\n StopIteration\n\n '
def _descendents(node, children=None):
children = (children or list())
children.append(node)
for child in node.children(filter=None):
_descendents(child, children)
return children
typeName = None
if isinstance(type, str):
typeName = type
type = om.MFn.kInvalid
descendents = _descendents(self)[1:]
for child in descendents:
if (typeName is None):
if ((not type) or (type == child._fn.typeId)):
(yield child)
elif ((not typeName) or (typeName == child._fn.typeName)):
(yield child)
|
def xmlMarkup(games, team_ab, team_name, team_record):
"Markup the RSS feed using the data obtained.\n\n :param games: list of games that the team played this season\n :type games: list of GameData\n :param team_ab: the team's abbreviated name\n :type team_ab: string\n :param team_name: the team's name\n :type team_name: string"
file_name = (team_ab + '_feed.xml')
'Used code from http://stackoverflow.com/questions/7935972/\n writing-to-a-new-directory-in-python-without-changing-directory'
script_dir = os.path.dirname(os.path.abspath(__file__))
dest_dir = os.path.join(script_dir, 'feeds', team_ab)
try:
os.makedirs(dest_dir)
except OSError:
pass
path = os.path.join(dest_dir, file_name)
with open(path, 'w') as xml:
xml.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
xml.write("<rss version='2.0'>\n")
xml.write('<channel>\n')
xml.write(('<title>%s - %s</title>\n' % (team_name, team_record)))
xml.write(('<description>Latest %s scores</description>\n' % team_name))
xml.write(('<link>http://espn.go.com/nhl/team/schedule/_/name/%s</link>\n' % team_ab))
for game in games:
xml.write('<item>\n')
xml.write(('<title>%s</title>\n' % game.headline))
xml.write(('<link>%s</link>\n' % game.link))
xml.write('</item>\n')
xml.write('</channel>\n</rss>')
xml.close()
| -8,852,356,321,434,919,000
|
Markup the RSS feed using the data obtained.
:param games: list of games that the team played this season
:type games: list of GameData
:param team_ab: the team's abbreviated name
:type team_ab: string
:param team_name: the team's name
:type team_name: string
|
markup.py
|
xmlMarkup
|
ak212/python-hockey-rss
|
python
|
def xmlMarkup(games, team_ab, team_name, team_record):
"Markup the RSS feed using the data obtained.\n\n :param games: list of games that the team played this season\n :type games: list of GameData\n :param team_ab: the team's abbreviated name\n :type team_ab: string\n :param team_name: the team's name\n :type team_name: string"
file_name = (team_ab + '_feed.xml')
'Used code from http://stackoverflow.com/questions/7935972/\n writing-to-a-new-directory-in-python-without-changing-directory'
script_dir = os.path.dirname(os.path.abspath(__file__))
dest_dir = os.path.join(script_dir, 'feeds', team_ab)
try:
os.makedirs(dest_dir)
except OSError:
pass
path = os.path.join(dest_dir, file_name)
with open(path, 'w') as xml:
xml.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
xml.write("<rss version='2.0'>\n")
xml.write('<channel>\n')
xml.write(('<title>%s - %s</title>\n' % (team_name, team_record)))
xml.write(('<description>Latest %s scores</description>\n' % team_name))
xml.write(('<link>http://espn.go.com/nhl/team/schedule/_/name/%s</link>\n' % team_ab))
for game in games:
xml.write('<item>\n')
xml.write(('<title>%s</title>\n' % game.headline))
xml.write(('<link>%s</link>\n' % game.link))
xml.write('</item>\n')
xml.write('</channel>\n</rss>')
xml.close()
|
def StatEntryFromPath(path, pathspec, ext_attrs=True):
'Builds a stat entry object from a given path.\n\n Args:\n path: A path (string value) to stat.\n pathspec: A `PathSpec` corresponding to the `path`.\n ext_attrs: Whether to include extended file attributes in the result.\n\n Returns:\n `StatEntry` object.\n '
try:
stat = utils.Stat(path)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
| 7,072,671,682,587,733,000
|
Builds a stat entry object from a given path.
Args:
path: A path (string value) to stat.
pathspec: A `PathSpec` corresponding to the `path`.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
|
grr/client/grr_response_client/client_utils.py
|
StatEntryFromPath
|
billstackpole/grr
|
python
|
def StatEntryFromPath(path, pathspec, ext_attrs=True):
'Builds a stat entry object from a given path.\n\n Args:\n path: A path (string value) to stat.\n pathspec: A `PathSpec` corresponding to the `path`.\n ext_attrs: Whether to include extended file attributes in the result.\n\n Returns:\n `StatEntry` object.\n '
try:
stat = utils.Stat(path)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
|
def StatEntryFromStat(stat, pathspec, ext_attrs=True):
'Build a stat entry object from a given stat object.\n\n Args:\n stat: A `Stat` object.\n pathspec: A `PathSpec` from which `stat` was obtained.\n ext_attrs: Whether to include extended file attributes in the result.\n\n Returns:\n `StatEntry` object.\n '
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if (value is None):
continue
value = int(value)
if (value < 0):
value &= 4294967295
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
| 2,486,362,639,857,381,400
|
Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
|
grr/client/grr_response_client/client_utils.py
|
StatEntryFromStat
|
billstackpole/grr
|
python
|
def StatEntryFromStat(stat, pathspec, ext_attrs=True):
'Build a stat entry object from a given stat object.\n\n Args:\n stat: A `Stat` object.\n pathspec: A `PathSpec` from which `stat` was obtained.\n ext_attrs: Whether to include extended file attributes in the result.\n\n Returns:\n `StatEntry` object.\n '
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if (value is None):
continue
value = int(value)
if (value < 0):
value &= 4294967295
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
|
@functools.partial(api_util.api_hook, tag='jax2tf_convert')
def convert(fun: Callable, *, polymorphic_shapes: Optional[Sequence[Any]]=None, with_gradient=True, enable_xla=True) -> Callable:
'Transforms `fun` to be executed by TensorFlow.\n\n See\n [README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)\n for more details about usage and common problems.\n\n Args:\n fun: Function to be transformed. Its arguments and return value should be\n JAX arrays, or nested standard Python containers (tuple/list/dict) thereof\n (pytrees).\n polymorphic_shapes: Specifies input shapes to be treated polymorphically\n during conversion.\n .. warning:: The shape-polymorphic conversion is an experimental feature.\n It is meant to be sound, but it is known to reject some JAX programs\n that are shape polymorphic. The details of this feature can change. It\n should be a Python object with the same pytree structure as, or a prefix\n of, the tuple of arguments to the function, but with a shape\n specification corresponding to each argument. The default value is\n `None`, which is a shortcut for a tuple of `None` one for each argument,\n denoting that all shapes are monomorphic.\n See [how optional parameters are matched to\n arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).\n A shape specification for an array argument should be an object\n `PolyShape(dim0, dim1, ..., dimn)`\n where each `dim` is a dimension specification: a positive integer denoting\n a monomorphic dimension of the given size, or a string denoting a\n dimension variable assumed to range over non-zero dimension sizes, or\n the special placeholder string "_" denoting a monomorphic dimension\n whose size is given by the actual argument. As a shortcut, an Ellipsis\n suffix in the list of dimension specifications stands for a list of "_"\n placeholders. For convenience, a shape specification can also be given\n as a string\n representation, e.g.: "batch, ...", "batch, height, width, _", possibly\n with surrounding parentheses: "(batch, ...)".\n\n The conversion fails if it cannot ensure that the it would produce the same\n sequence of TF ops for any non-zero values of the dimension variables.\n\n polymorphic_shapes are only supported for positional arguments; shape\n polymorphism is not supported for keyword arguments.\n\n See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)\n for more details.\n\n in_shapes: DEPRECATED in favor of `polymorphic_shapes`.\n with_gradient: if set, will add a tf.custom_gradient to the converted\n function, by converting the ``jax.vjp(fun)``. Only first-order\n differentiation is supported for now. If the converted function is saved\n in a SavedModel, the custom gradients are currently lost and an error will\n be raised if a gradient computation is attempted. This is due to a current\n bug in TensorFlow.\n enable_xla: if unset, the converter will try harder to use pure TF ops to\n convert the function, and raise an error if it can not be converted\n without resorting to XLA ops (default: True).\n\n Returns:\n A version of `fun` that expects TfVals as arguments (or\n tuple/lists/dicts) thereof, and returns TfVals as outputs.\n '
api._check_callable(fun)
def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:
if (not core.trace_state_clean()):
raise ValueError(('convert must be used outside all JAX transformations.' + f'Trace state: {core.thread_local_state.trace_state}'))
def check_arg(a):
if (not _is_tfval(a)):
msg = f'Argument {a} of type {type(a)} of jax2tf.convert(f) should be NumPy array, scalar, tf.Variable, or tf.Tensor'
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
tree_util.tree_map(check_arg, list(kwargs.values()))
args = tuple((tree_util.tree_map((lambda x, i=i: tf.identity(x, f'jax2tf_arg_{i}')), a) for (i, a) in enumerate(args)))
kwargs = {k: tf.identity(v, f'jax2tf_arg_{k}') for (k, v) in kwargs.items()}
(args_flat, in_tree) = tree_util.tree_flatten((args, kwargs))
if (polymorphic_shapes is None):
polymorphic_shapes_ = ((None,) * len(args))
else:
if ((not isinstance(polymorphic_shapes, Sequence)) or (len(args) != len(polymorphic_shapes))):
msg = f'polymorphic_shapes must be a sequence with the same length as the positional argument list ({len(args)}). Got polymorphic_shapes={polymorphic_shapes}.'
raise TypeError(msg)
polymorphic_shapes_ = tuple(polymorphic_shapes)
polymorphic_shapes_flat = tuple(api_util.flatten_axes('jax2tf.convert polymorphic_shapes', in_tree.children()[0], polymorphic_shapes_))
polymorphic_shapes_flat = (polymorphic_shapes_flat + tuple(((None,) * (len(args_flat) - len(polymorphic_shapes_flat)))))
(args_avals_flat, shapeenv) = _args_to_avals_and_env(args_flat, polymorphic_shapes_flat)
f = lu.wrap_init(fun)
(flat_fun, out_tree_thunk) = flatten_fun(f, in_tree)
def converted_grad_fn(*out_cts_flat: TfVal, _out_cts_avals: Sequence[core.AbstractValue], variables=None):
if variables:
raise ValueError(f'Unexpected variables used in forward pass. This should not happen for first-order differentiation. variables={variables}')
def fun_vjp_jax(args_jax, out_cts_jax):
(_, pullback_jax) = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if (polymorphic_shapes is None):
vjp_polymorphic_shapes = None
else:
args_polymorphic_shapes = tree_util.tree_unflatten(in_tree.children()[0], polymorphic_shapes_flat)
out_cts_polymorphic_shapes = tree_util.tree_unflatten(out_tree_thunk(), tuple((str(out_aval.shape) for out_aval in _out_cts_avals)))
vjp_polymorphic_shapes = [args_polymorphic_shapes, out_cts_polymorphic_shapes]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
with tf.name_scope('jax2tf_vjp'):
in_cts = convert(fun_vjp_jax, with_gradient=False, polymorphic_shapes=vjp_polymorphic_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert (not _shape_env), f'Unexpected shape environment {_shape_env}'
global _enable_xla
prev_enable_xla = _enable_xla
_enable_xla = enable_xla
_shape_env = shapeenv
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
(outs, out_avals) = util.unzip2(out_with_avals)
return (tuple(outs), functools.partial(converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = 'The jax2tf-converted function does not support gradients. Use `with_gradient` parameter to enable gradients'
out_flat = [tf.raw_ops.PreventGradient(input=o, message=message) for (o, _) in out_flat_raw]
finally:
_shape_env = {}
_enable_xla = prev_enable_xla
out_flat = [tf.identity(x, 'jax2tf_out') for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
| -6,558,663,470,984,845,000
|
Transforms `fun` to be executed by TensorFlow.
See
[README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)
for more details about usage and common problems.
Args:
fun: Function to be transformed. Its arguments and return value should be
JAX arrays, or nested standard Python containers (tuple/list/dict) thereof
(pytrees).
polymorphic_shapes: Specifies input shapes to be treated polymorphically
during conversion.
.. warning:: The shape-polymorphic conversion is an experimental feature.
It is meant to be sound, but it is known to reject some JAX programs
that are shape polymorphic. The details of this feature can change. It
should be a Python object with the same pytree structure as, or a prefix
of, the tuple of arguments to the function, but with a shape
specification corresponding to each argument. The default value is
`None`, which is a shortcut for a tuple of `None` one for each argument,
denoting that all shapes are monomorphic.
See [how optional parameters are matched to
arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
A shape specification for an array argument should be an object
`PolyShape(dim0, dim1, ..., dimn)`
where each `dim` is a dimension specification: a positive integer denoting
a monomorphic dimension of the given size, or a string denoting a
dimension variable assumed to range over non-zero dimension sizes, or
the special placeholder string "_" denoting a monomorphic dimension
whose size is given by the actual argument. As a shortcut, an Ellipsis
suffix in the list of dimension specifications stands for a list of "_"
placeholders. For convenience, a shape specification can also be given
as a string
representation, e.g.: "batch, ...", "batch, height, width, _", possibly
with surrounding parentheses: "(batch, ...)".
The conversion fails if it cannot ensure that the it would produce the same
sequence of TF ops for any non-zero values of the dimension variables.
polymorphic_shapes are only supported for positional arguments; shape
polymorphism is not supported for keyword arguments.
See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)
for more details.
in_shapes: DEPRECATED in favor of `polymorphic_shapes`.
with_gradient: if set, will add a tf.custom_gradient to the converted
function, by converting the ``jax.vjp(fun)``. Only first-order
differentiation is supported for now. If the converted function is saved
in a SavedModel, the custom gradients are currently lost and an error will
be raised if a gradient computation is attempted. This is due to a current
bug in TensorFlow.
enable_xla: if unset, the converter will try harder to use pure TF ops to
convert the function, and raise an error if it can not be converted
without resorting to XLA ops (default: True).
Returns:
A version of `fun` that expects TfVals as arguments (or
tuple/lists/dicts) thereof, and returns TfVals as outputs.
|
jax/experimental/jax2tf/jax2tf.py
|
convert
|
ho-oto/jax
|
python
|
@functools.partial(api_util.api_hook, tag='jax2tf_convert')
def convert(fun: Callable, *, polymorphic_shapes: Optional[Sequence[Any]]=None, with_gradient=True, enable_xla=True) -> Callable:
'Transforms `fun` to be executed by TensorFlow.\n\n See\n [README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)\n for more details about usage and common problems.\n\n Args:\n fun: Function to be transformed. Its arguments and return value should be\n JAX arrays, or nested standard Python containers (tuple/list/dict) thereof\n (pytrees).\n polymorphic_shapes: Specifies input shapes to be treated polymorphically\n during conversion.\n .. warning:: The shape-polymorphic conversion is an experimental feature.\n It is meant to be sound, but it is known to reject some JAX programs\n that are shape polymorphic. The details of this feature can change. It\n should be a Python object with the same pytree structure as, or a prefix\n of, the tuple of arguments to the function, but with a shape\n specification corresponding to each argument. The default value is\n `None`, which is a shortcut for a tuple of `None` one for each argument,\n denoting that all shapes are monomorphic.\n See [how optional parameters are matched to\n arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).\n A shape specification for an array argument should be an object\n `PolyShape(dim0, dim1, ..., dimn)`\n where each `dim` is a dimension specification: a positive integer denoting\n a monomorphic dimension of the given size, or a string denoting a\n dimension variable assumed to range over non-zero dimension sizes, or\n the special placeholder string "_" denoting a monomorphic dimension\n whose size is given by the actual argument. As a shortcut, an Ellipsis\n suffix in the list of dimension specifications stands for a list of "_"\n placeholders. For convenience, a shape specification can also be given\n as a string\n representation, e.g.: "batch, ...", "batch, height, width, _", possibly\n with surrounding parentheses: "(batch, ...)".\n\n The conversion fails if it cannot ensure that the it would produce the same\n sequence of TF ops for any non-zero values of the dimension variables.\n\n polymorphic_shapes are only supported for positional arguments; shape\n polymorphism is not supported for keyword arguments.\n\n See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)\n for more details.\n\n in_shapes: DEPRECATED in favor of `polymorphic_shapes`.\n with_gradient: if set, will add a tf.custom_gradient to the converted\n function, by converting the ``jax.vjp(fun)``. Only first-order\n differentiation is supported for now. If the converted function is saved\n in a SavedModel, the custom gradients are currently lost and an error will\n be raised if a gradient computation is attempted. This is due to a current\n bug in TensorFlow.\n enable_xla: if unset, the converter will try harder to use pure TF ops to\n convert the function, and raise an error if it can not be converted\n without resorting to XLA ops (default: True).\n\n Returns:\n A version of `fun` that expects TfVals as arguments (or\n tuple/lists/dicts) thereof, and returns TfVals as outputs.\n '
api._check_callable(fun)
def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:
if (not core.trace_state_clean()):
raise ValueError(('convert must be used outside all JAX transformations.' + f'Trace state: {core.thread_local_state.trace_state}'))
def check_arg(a):
if (not _is_tfval(a)):
msg = f'Argument {a} of type {type(a)} of jax2tf.convert(f) should be NumPy array, scalar, tf.Variable, or tf.Tensor'
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
tree_util.tree_map(check_arg, list(kwargs.values()))
args = tuple((tree_util.tree_map((lambda x, i=i: tf.identity(x, f'jax2tf_arg_{i}')), a) for (i, a) in enumerate(args)))
kwargs = {k: tf.identity(v, f'jax2tf_arg_{k}') for (k, v) in kwargs.items()}
(args_flat, in_tree) = tree_util.tree_flatten((args, kwargs))
if (polymorphic_shapes is None):
polymorphic_shapes_ = ((None,) * len(args))
else:
if ((not isinstance(polymorphic_shapes, Sequence)) or (len(args) != len(polymorphic_shapes))):
msg = f'polymorphic_shapes must be a sequence with the same length as the positional argument list ({len(args)}). Got polymorphic_shapes={polymorphic_shapes}.'
raise TypeError(msg)
polymorphic_shapes_ = tuple(polymorphic_shapes)
polymorphic_shapes_flat = tuple(api_util.flatten_axes('jax2tf.convert polymorphic_shapes', in_tree.children()[0], polymorphic_shapes_))
polymorphic_shapes_flat = (polymorphic_shapes_flat + tuple(((None,) * (len(args_flat) - len(polymorphic_shapes_flat)))))
(args_avals_flat, shapeenv) = _args_to_avals_and_env(args_flat, polymorphic_shapes_flat)
f = lu.wrap_init(fun)
(flat_fun, out_tree_thunk) = flatten_fun(f, in_tree)
def converted_grad_fn(*out_cts_flat: TfVal, _out_cts_avals: Sequence[core.AbstractValue], variables=None):
if variables:
raise ValueError(f'Unexpected variables used in forward pass. This should not happen for first-order differentiation. variables={variables}')
def fun_vjp_jax(args_jax, out_cts_jax):
(_, pullback_jax) = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if (polymorphic_shapes is None):
vjp_polymorphic_shapes = None
else:
args_polymorphic_shapes = tree_util.tree_unflatten(in_tree.children()[0], polymorphic_shapes_flat)
out_cts_polymorphic_shapes = tree_util.tree_unflatten(out_tree_thunk(), tuple((str(out_aval.shape) for out_aval in _out_cts_avals)))
vjp_polymorphic_shapes = [args_polymorphic_shapes, out_cts_polymorphic_shapes]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
with tf.name_scope('jax2tf_vjp'):
in_cts = convert(fun_vjp_jax, with_gradient=False, polymorphic_shapes=vjp_polymorphic_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert (not _shape_env), f'Unexpected shape environment {_shape_env}'
global _enable_xla
prev_enable_xla = _enable_xla
_enable_xla = enable_xla
_shape_env = shapeenv
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
(outs, out_avals) = util.unzip2(out_with_avals)
return (tuple(outs), functools.partial(converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = 'The jax2tf-converted function does not support gradients. Use `with_gradient` parameter to enable gradients'
out_flat = [tf.raw_ops.PreventGradient(input=o, message=message) for (o, _) in out_flat_raw]
finally:
_shape_env = {}
_enable_xla = prev_enable_xla
out_flat = [tf.identity(x, 'jax2tf_out') for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
|
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
'Convert the JAX implementation of a primitive.\n\n Args:\n jax_impl: typically the impl-rule for a primitive, with signature\n `(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements\n a primitive in terms of other primitives.\n multiple_results: whether `jax_impl` returns a sequence of results.\n\n Returns:\n a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)\n -> Sequence[TfVal]`.\n '
def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.AbstractValue], _out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return (jax_results if multiple_results else [jax_results])
tf_results_with_avals = _interpret_fun(lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
(tf_results, _) = util.unzip2(tf_results_with_avals)
return (tf_results if multiple_results else tf_results[0])
return wrapped
| 397,753,964,971,300,600
|
Convert the JAX implementation of a primitive.
Args:
jax_impl: typically the impl-rule for a primitive, with signature
`(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements
a primitive in terms of other primitives.
multiple_results: whether `jax_impl` returns a sequence of results.
Returns:
a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)
-> Sequence[TfVal]`.
|
jax/experimental/jax2tf/jax2tf.py
|
_convert_jax_impl
|
ho-oto/jax
|
python
|
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
'Convert the JAX implementation of a primitive.\n\n Args:\n jax_impl: typically the impl-rule for a primitive, with signature\n `(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements\n a primitive in terms of other primitives.\n multiple_results: whether `jax_impl` returns a sequence of results.\n\n Returns:\n a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)\n -> Sequence[TfVal]`.\n '
def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.AbstractValue], _out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return (jax_results if multiple_results else [jax_results])
tf_results_with_avals = _interpret_fun(lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
(tf_results, _) = util.unzip2(tf_results_with_avals)
return (tf_results if multiple_results else tf_results[0])
return wrapped
|
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
'Evaluates a Jaxpr with tf.Tensor arguments.\n\n The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.\n '
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple((v for (v, _) in out_with_avals))
| 5,744,566,795,549,360,000
|
Evaluates a Jaxpr with tf.Tensor arguments.
The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.
|
jax/experimental/jax2tf/jax2tf.py
|
_interpret_jaxpr
|
ho-oto/jax
|
python
|
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
'Evaluates a Jaxpr with tf.Tensor arguments.\n\n The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.\n '
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple((v for (v, _) in out_with_avals))
|
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[(Optional[int], ...)]:
'Generate a TF shape, possibly containing None for polymorphic dimensions.'
return tuple(map((lambda d: (None if isinstance(d, shape_poly.DimVar) else d)), aval.shape))
| 8,408,812,577,119,716,000
|
Generate a TF shape, possibly containing None for polymorphic dimensions.
|
jax/experimental/jax2tf/jax2tf.py
|
_aval_to_tf_shape
|
ho-oto/jax
|
python
|
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[(Optional[int], ...)]:
return tuple(map((lambda d: (None if isinstance(d, shape_poly.DimVar) else d)), aval.shape))
|
def _tfval_shape_dtype(val: TfVal) -> Tuple[(Sequence[Optional[int]], DType)]:
'Called for constants that occur in the program, or for input values to the converted function.\n\n The returned shape may have unknown components, but only when called for\n inputs.\n '
if isinstance(val, (tf.Tensor, tf.Variable)):
return (tuple(val.shape), to_jax_dtype(val.dtype))
else:
assert ((not config.jax_enable_checks) or _is_tfval(val)), f'Non TfVal: {val}'
raw_aval = xla.abstractify(val)
return (raw_aval.shape, raw_aval.dtype)
| 4,432,653,672,224,603,600
|
Called for constants that occur in the program, or for input values to the converted function.
The returned shape may have unknown components, but only when called for
inputs.
|
jax/experimental/jax2tf/jax2tf.py
|
_tfval_shape_dtype
|
ho-oto/jax
|
python
|
def _tfval_shape_dtype(val: TfVal) -> Tuple[(Sequence[Optional[int]], DType)]:
'Called for constants that occur in the program, or for input values to the converted function.\n\n The returned shape may have unknown components, but only when called for\n inputs.\n '
if isinstance(val, (tf.Tensor, tf.Variable)):
return (tuple(val.shape), to_jax_dtype(val.dtype))
else:
assert ((not config.jax_enable_checks) or _is_tfval(val)), f'Non TfVal: {val}'
raw_aval = xla.abstractify(val)
return (raw_aval.shape, raw_aval.dtype)
|
def _args_to_avals_and_env(args: Sequence[TfVal], polymorphic_shapes: Sequence[Optional[Union[(str, PolyShape)]]]) -> Tuple[(Sequence[core.AbstractValue], _ShapeEnv)]:
'Computes abstract values and a dimension environment for arguments.\n\n Args:\n args: the arguments, TF inputs.\n polymorphic_shapes: the polymorphic specifications for the arguments.\n Returns: a tuple of a sequence of abtract values corresponding to the\n arguments and a dimension environment.\n '
shapeenv: _ShapeEnv = {}
def input_aval(arg: TfVal, polymorphic_shape: Optional[str]) -> core.AbstractValue:
'The abstract value for an input.'
(raw_shape, dtype) = _tfval_shape_dtype(arg)
aval_shape = shape_poly.parse_spec(polymorphic_shape, raw_shape)
for (i, d) in enumerate(aval_shape):
if (type(d) is int):
assert (d == np.shape(arg)[i])
elif ((type(d) is shape_poly.DimVar) and (d not in shapeenv)):
shapeenv[d] = tf.shape(arg)[i]
else:
pass
return core.ShapedArray(aval_shape, dtype)
avals = tuple(map(input_aval, args, polymorphic_shapes))
return (avals, shapeenv)
| -6,757,952,359,203,451,000
|
Computes abstract values and a dimension environment for arguments.
Args:
args: the arguments, TF inputs.
polymorphic_shapes: the polymorphic specifications for the arguments.
Returns: a tuple of a sequence of abtract values corresponding to the
arguments and a dimension environment.
|
jax/experimental/jax2tf/jax2tf.py
|
_args_to_avals_and_env
|
ho-oto/jax
|
python
|
def _args_to_avals_and_env(args: Sequence[TfVal], polymorphic_shapes: Sequence[Optional[Union[(str, PolyShape)]]]) -> Tuple[(Sequence[core.AbstractValue], _ShapeEnv)]:
'Computes abstract values and a dimension environment for arguments.\n\n Args:\n args: the arguments, TF inputs.\n polymorphic_shapes: the polymorphic specifications for the arguments.\n Returns: a tuple of a sequence of abtract values corresponding to the\n arguments and a dimension environment.\n '
shapeenv: _ShapeEnv = {}
def input_aval(arg: TfVal, polymorphic_shape: Optional[str]) -> core.AbstractValue:
'The abstract value for an input.'
(raw_shape, dtype) = _tfval_shape_dtype(arg)
aval_shape = shape_poly.parse_spec(polymorphic_shape, raw_shape)
for (i, d) in enumerate(aval_shape):
if (type(d) is int):
assert (d == np.shape(arg)[i])
elif ((type(d) is shape_poly.DimVar) and (d not in shapeenv)):
shapeenv[d] = tf.shape(arg)[i]
else:
pass
return core.ShapedArray(aval_shape, dtype)
avals = tuple(map(input_aval, args, polymorphic_shapes))
return (avals, shapeenv)
|
def shape_as_value(x):
'Injects the shape of `x` as an array value.\n\n **Experimental: please give feedback, and expect changes!**\n\n This allows the use of a shape expression as array argument to JAX functions.\n A typical example is for implementing a mean operation:\n\n jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))\n '
return NotImplementedError('shape_as_value is deprecated')
| 7,109,168,487,154,696,000
|
Injects the shape of `x` as an array value.
**Experimental: please give feedback, and expect changes!**
This allows the use of a shape expression as array argument to JAX functions.
A typical example is for implementing a mean operation:
jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))
|
jax/experimental/jax2tf/jax2tf.py
|
shape_as_value
|
ho-oto/jax
|
python
|
def shape_as_value(x):
'Injects the shape of `x` as an array value.\n\n **Experimental: please give feedback, and expect changes!**\n\n This allows the use of a shape expression as array argument to JAX functions.\n A typical example is for implementing a mean operation:\n\n jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))\n '
return NotImplementedError('shape_as_value is deprecated')
|
def _not(x):
'Computes bitwise not with support for booleans.\n\n Numpy and JAX support bitwise not for booleans by applying a logical not!\n This means that applying bitwise_not yields an unexected result:\n jnp.bitwise_not(jnp.array([True, False]))\n >> DeviceArray([False, True], dtype=bool)\n\n if you assume that booleans are simply casted to integers.\n jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)\n >> DeviceArray([True, True], dtype=bool)\n '
if (x.dtype == tf.bool):
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
| -711,532,768,277,860,400
|
Computes bitwise not with support for booleans.
Numpy and JAX support bitwise not for booleans by applying a logical not!
This means that applying bitwise_not yields an unexected result:
jnp.bitwise_not(jnp.array([True, False]))
>> DeviceArray([False, True], dtype=bool)
if you assume that booleans are simply casted to integers.
jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)
>> DeviceArray([True, True], dtype=bool)
|
jax/experimental/jax2tf/jax2tf.py
|
_not
|
ho-oto/jax
|
python
|
def _not(x):
'Computes bitwise not with support for booleans.\n\n Numpy and JAX support bitwise not for booleans by applying a logical not!\n This means that applying bitwise_not yields an unexected result:\n jnp.bitwise_not(jnp.array([True, False]))\n >> DeviceArray([False, True], dtype=bool)\n\n if you assume that booleans are simply casted to integers.\n jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)\n >> DeviceArray([True, True], dtype=bool)\n '
if (x.dtype == tf.bool):
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
|
def bool_to_int8(f, argnums):
'Computes bool valued functions using int8.'
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if (not any(((args[i].dtype == tf.bool) for i in argnums))):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if (i in argnums) else a) for (i, a) in enumerate(args)]
if ('_in_avals' in kwargs):
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [(cast_aval(aval) if (i in argnums) else aval) for (i, aval) in enumerate(kwargs['_in_avals'])]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs['_out_aval'])
kwargs = dict(kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure((lambda o: tf.cast(o, tf.bool)), out)
return wrapper
| -6,657,190,721,257,098,000
|
Computes bool valued functions using int8.
|
jax/experimental/jax2tf/jax2tf.py
|
bool_to_int8
|
ho-oto/jax
|
python
|
def bool_to_int8(f, argnums):
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if (not any(((args[i].dtype == tf.bool) for i in argnums))):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if (i in argnums) else a) for (i, a) in enumerate(args)]
if ('_in_avals' in kwargs):
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [(cast_aval(aval) if (i in argnums) else aval) for (i, aval) in enumerate(kwargs['_in_avals'])]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs['_out_aval'])
kwargs = dict(kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure((lambda o: tf.cast(o, tf.bool)), out)
return wrapper
|
def _conv_general_dimension_numbers_proto(dimension_numbers):
'Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers.'
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
(lhs_spec, rhs_spec, out_spec) = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
| 5,265,963,477,797,816,000
|
Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers.
|
jax/experimental/jax2tf/jax2tf.py
|
_conv_general_dimension_numbers_proto
|
ho-oto/jax
|
python
|
def _conv_general_dimension_numbers_proto(dimension_numbers):
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
(lhs_spec, rhs_spec, out_spec) = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
|
def _precision_config_proto(precision: Optional[Tuple[(PrecisionType, PrecisionType)]]):
'Convert an integer to an XLA.PrecisionConfig.'
if (precision is None):
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision[0]))
proto.operand_precision.append(int(precision[1]))
return proto
| 3,985,637,154,650,195,500
|
Convert an integer to an XLA.PrecisionConfig.
|
jax/experimental/jax2tf/jax2tf.py
|
_precision_config_proto
|
ho-oto/jax
|
python
|
def _precision_config_proto(precision: Optional[Tuple[(PrecisionType, PrecisionType)]]):
if (precision is None):
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision[0]))
proto.operand_precision.append(int(precision[1]))
return proto
|
def _conv_general_dilated(lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers: lax.ConvDimensionNumbers, feature_group_count: int, batch_group_count: int, lhs_shape: Sequence[int], rhs_shape: Sequence[int], precision: Optional[Tuple[(PrecisionType, PrecisionType)]], preferred_element_type: Optional[DType], _in_avals: Sequence[core.AbstractValue], _out_aval: core.AbstractValue):
'Implementation of lax.conv_general_dilated_p using XlaConv.'
out_tf_shape = _aval_to_tf_shape(_out_aval)
if (not _enable_xla):
return _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count, batch_group_count, preferred_element_type, out_tf_shape)
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _precision_config_proto(precision)
assert (batch_group_count == 1)
def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):
out = tfxla.conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dnums_proto, feature_group_count=feature_group_count, precision_config=precision_config_proto, preferred_element_type=preferred_element_type)
out.set_shape(out_tf_shape)
return out
if np.issubdtype(_in_avals[0].dtype, np.complexfloating):
if (preferred_element_type is not None):
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_float_et = (np.float64 if (preferred_element_type == np.complex128) else np.float32)
else:
preferred_float_et = None
(lhs_real, lhs_imag) = (tf.math.real(lhs), tf.math.imag(lhs))
(rhs_real, rhs_imag) = (tf.math.real(rhs), tf.math.imag(rhs))
k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)
k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real), preferred_float_et)
k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)
return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))
else:
return gen_conv(lhs, rhs, preferred_element_type)
| -7,552,919,039,765,629,000
|
Implementation of lax.conv_general_dilated_p using XlaConv.
|
jax/experimental/jax2tf/jax2tf.py
|
_conv_general_dilated
|
ho-oto/jax
|
python
|
def _conv_general_dilated(lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers: lax.ConvDimensionNumbers, feature_group_count: int, batch_group_count: int, lhs_shape: Sequence[int], rhs_shape: Sequence[int], precision: Optional[Tuple[(PrecisionType, PrecisionType)]], preferred_element_type: Optional[DType], _in_avals: Sequence[core.AbstractValue], _out_aval: core.AbstractValue):
out_tf_shape = _aval_to_tf_shape(_out_aval)
if (not _enable_xla):
return _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count, batch_group_count, preferred_element_type, out_tf_shape)
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _precision_config_proto(precision)
assert (batch_group_count == 1)
def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):
out = tfxla.conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dnums_proto, feature_group_count=feature_group_count, precision_config=precision_config_proto, preferred_element_type=preferred_element_type)
out.set_shape(out_tf_shape)
return out
if np.issubdtype(_in_avals[0].dtype, np.complexfloating):
if (preferred_element_type is not None):
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_float_et = (np.float64 if (preferred_element_type == np.complex128) else np.float32)
else:
preferred_float_et = None
(lhs_real, lhs_imag) = (tf.math.real(lhs), tf.math.imag(lhs))
(rhs_real, rhs_imag) = (tf.math.real(rhs), tf.math.imag(rhs))
k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)
k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real), preferred_float_et)
k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)
return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))
else:
return gen_conv(lhs, rhs, preferred_element_type)
|
def _dot_general(lhs, rhs, *, dimension_numbers, precision: Optional[Tuple[(PrecisionType, PrecisionType)]], preferred_element_type: Optional[DType], _in_avals: Sequence[core.AbstractValue], _out_aval: core.AbstractValue):
'Implementation of lax.dot_general_p in terms of tf.linalg.einsum.'
((lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch)) = dimension_numbers
(lhs_ndim, rhs_ndim) = (len(lhs.shape), len(rhs.shape))
if _enable_xla:
dnums_proto = xla_data_pb2.DotDimensionNumbers()
dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)
dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)
dnums_proto.lhs_batch_dimensions.extend(lhs_batch)
dnums_proto.rhs_batch_dimensions.extend(rhs_batch)
precision_config_proto = _precision_config_proto(precision)
res = tfxla.dot_general(lhs, rhs, dnums_proto, precision_config_proto, preferred_element_type=preferred_element_type)
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
if ((lhs_batch == rhs_batch == tuple(range(len(lhs_batch)))) and ((lhs_ndim - rhs_ndim) in [(- 1), 0, 1]) and (1 <= (lhs_ndim - len(lhs_batch)) <= 2) and (1 <= (rhs_ndim - len(rhs_batch)) <= 2) and (lhs_contracting == ((len(lhs.shape) - 1),)) and (rhs_contracting == (len(lhs_batch),))):
squeeze_idxs = []
if ((lhs_ndim - len(lhs_batch)) == 1):
lhs = tf.expand_dims(lhs, (lhs_ndim - 1))
squeeze_idxs.append((len(lhs.shape) - 2))
if ((rhs_ndim - len(rhs_batch)) == 1):
rhs = tf.expand_dims(rhs, rhs_ndim)
squeeze_idxs.append((len(rhs.shape) - 1))
result = tf.linalg.matmul(lhs, rhs)
if (len(squeeze_idxs) != 0):
assert all([(result.shape[i] == 1) for i in squeeze_idxs])
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for (lhs_axis, rhs_axis) in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids = []
for (lhs_axis, rhs_axis) in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids.append(shared_id)
not_none = (lambda x: (x is not None))
out_axis_ids = list(filter(not_none, ((batch_ids + lhs_out_axis_ids) + rhs_out_axis_ids)))
assert (lhs.dtype == rhs.dtype)
spec = '{},{}->{}'.format(''.join(lhs_axis_ids), ''.join(rhs_axis_ids), ''.join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
| -7,824,252,544,626,182,000
|
Implementation of lax.dot_general_p in terms of tf.linalg.einsum.
|
jax/experimental/jax2tf/jax2tf.py
|
_dot_general
|
ho-oto/jax
|
python
|
def _dot_general(lhs, rhs, *, dimension_numbers, precision: Optional[Tuple[(PrecisionType, PrecisionType)]], preferred_element_type: Optional[DType], _in_avals: Sequence[core.AbstractValue], _out_aval: core.AbstractValue):
((lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch)) = dimension_numbers
(lhs_ndim, rhs_ndim) = (len(lhs.shape), len(rhs.shape))
if _enable_xla:
dnums_proto = xla_data_pb2.DotDimensionNumbers()
dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)
dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)
dnums_proto.lhs_batch_dimensions.extend(lhs_batch)
dnums_proto.rhs_batch_dimensions.extend(rhs_batch)
precision_config_proto = _precision_config_proto(precision)
res = tfxla.dot_general(lhs, rhs, dnums_proto, precision_config_proto, preferred_element_type=preferred_element_type)
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
if ((lhs_batch == rhs_batch == tuple(range(len(lhs_batch)))) and ((lhs_ndim - rhs_ndim) in [(- 1), 0, 1]) and (1 <= (lhs_ndim - len(lhs_batch)) <= 2) and (1 <= (rhs_ndim - len(rhs_batch)) <= 2) and (lhs_contracting == ((len(lhs.shape) - 1),)) and (rhs_contracting == (len(lhs_batch),))):
squeeze_idxs = []
if ((lhs_ndim - len(lhs_batch)) == 1):
lhs = tf.expand_dims(lhs, (lhs_ndim - 1))
squeeze_idxs.append((len(lhs.shape) - 2))
if ((rhs_ndim - len(rhs_batch)) == 1):
rhs = tf.expand_dims(rhs, rhs_ndim)
squeeze_idxs.append((len(rhs.shape) - 1))
result = tf.linalg.matmul(lhs, rhs)
if (len(squeeze_idxs) != 0):
assert all([(result.shape[i] == 1) for i in squeeze_idxs])
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for (lhs_axis, rhs_axis) in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids = []
for (lhs_axis, rhs_axis) in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids.append(shared_id)
not_none = (lambda x: (x is not None))
out_axis_ids = list(filter(not_none, ((batch_ids + lhs_out_axis_ids) + rhs_out_axis_ids)))
assert (lhs.dtype == rhs.dtype)
spec = '{},{}->{}'.format(.join(lhs_axis_ids), .join(rhs_axis_ids), .join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
|
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions, window_strides, padding, base_dilation, window_dilation, _in_avals, _out_aval):
'TensorFlow implementation of reduce_window.\n\n Args:\n operand: N dimensional array containing elements of type T\n init_value: starting value of the reduction\n jaxpr: the jaxpr corresponding to the reduction function\n consts: the constants associated with jaxpr.\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n\n Returns:\n The reduced operand.\n '
assert (len(consts) == 0), 'Reduction computation cannot have constants'
if (not _enable_xla):
raise _xla_disabled_error('reduce_window')
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
(res,) = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(operand, init_value, reducer, window_dimensions, window_strides, padding, base_dilation, window_dilation, _in_avals, _out_aval)
| 5,077,739,957,849,369,000
|
TensorFlow implementation of reduce_window.
Args:
operand: N dimensional array containing elements of type T
init_value: starting value of the reduction
jaxpr: the jaxpr corresponding to the reduction function
consts: the constants associated with jaxpr.
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
Returns:
The reduced operand.
|
jax/experimental/jax2tf/jax2tf.py
|
_reduce_window
|
ho-oto/jax
|
python
|
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions, window_strides, padding, base_dilation, window_dilation, _in_avals, _out_aval):
'TensorFlow implementation of reduce_window.\n\n Args:\n operand: N dimensional array containing elements of type T\n init_value: starting value of the reduction\n jaxpr: the jaxpr corresponding to the reduction function\n consts: the constants associated with jaxpr.\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n\n Returns:\n The reduced operand.\n '
assert (len(consts) == 0), 'Reduction computation cannot have constants'
if (not _enable_xla):
raise _xla_disabled_error('reduce_window')
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
(res,) = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(operand, init_value, reducer, window_dimensions, window_strides, padding, base_dilation, window_dilation, _in_avals, _out_aval)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.