code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def to_wbem_uri(self, format='standard'):
# pylint: disable=redefined-builtin
"""
Return the (untyped) WBEM URI string of this CIM instance path.
The returned WBEM URI contains its components as follows:
* it does not contain a namespace type (URI scheme).
* it contains an authority (host) component according to the
:attr:`~pywbem.CIMInstanceName.host` attribute, if that is not
`None`. Otherwise, it does not contain the authority component.
* it contains a namespace component according to the
:attr:`~pywbem.CIMInstanceName.namespace` attribute, if that is not
`None`. Otherwise, it does not contain the namespace component.
* it contains a class name component according to the
:attr:`~pywbem.CIMInstanceName.classname` attribute.
* it contains keybindings according to the
:attr:`~pywbem.CIMInstanceName.keybindings` attribute, with the
order of keybindings preserved, and the lexical case of keybinding
names preserved (except when using the format "canonical").
Note that when you do not want some of these components to show up
in the resulting WBEM URI string, you can set them to `None` before
calling this method.
Except when using the format "canonical", this method should not be
used to compare instance paths for equality: :term:`DSP0004` defines
defines several components of an instance path to be compared case
insensitively, including the names of keybindings. In addition, it
defines that the order of keybindings in instance paths does not matter
for the comparison. All WBEM URI formats returned by this method except
for the format "canonical" return a WBEM URI string that preserves the
order of keybindings (relative to how the keybindings were first added
to the :class:`~pywbem.CIMInstanceName` object) and that preserves the
lexical case of any components. Therefore, two instance paths that are
considered equal according to :term:`DSP0004` may not have equal WBEM
URI strings as returned by this method.
Instead, equality of instance paths represented by
:class:`~pywbem.CIMInstanceName` objects should be determined by using
the ``==`` operator, which performs the comparison conformant to
:term:`DSP0004`. If you have WBEM URI strings without the
corresponding :class:`~pywbem.CIMInstanceName` object, such an object
can be created by using the static method
:meth:`~pywbem.CIMInstanceName.from_wbem_uri`.
Parameters:
format (:term:`string`): Format for the generated WBEM URI string,
using one of the following values:
* ``"standard"`` - Standard format that is conformant to untyped
WBEM URIs for instance paths defined in :term:`DSP0207`.
* ``"canonical"`` - Like ``"standard"``, except that the following
items have been converted to lower case: host, namespace,
classname, and the names of any keybindings, and except that the
order of keybindings is in lexical order of the (lower-cased)
keybinding names.
This format guarantees that two instance paths that are
considered equal according to :term:`DSP0004` result in equal
WBEM URI strings. Therefore, the returned WBEM URI is suitable to
be used as a key in dictionaries of CIM instances.
* ``"cimobject"`` - Format for the `CIMObject` header field in
CIM-XML messages for representing instance paths (used
internally, see :term:`DSP0200`).
* ``"historical"`` - Historical format for WBEM URIs (used by
:meth:`~pywbem.CIMInstanceName.__str__`; should not be used by
new code). The historical format has the following differences to
the standard format:
- If the host component is not present, the slash after the
host is also omitted. In the standard format, that slash
is always present.
- If the namespace component is not present, the colon after the
namespace is also omitted. In the standard format, that colon
is always present.
Keybindings that are references use the specified format
recursively.
Examples:
* With host and namespace, standard format::
//ACME.com/cimv2/Test:CIM_RegisteredProfile.InstanceID="Acme.1"
* With host and namespace, canonical format::
//acme.com/cimv2/test:cim_registeredprofile.instanceid="Acme.1"
* Without host but with namespace, standard format::
/cimv2/Test:CIM_RegisteredProfile.InstanceID="Acme.1"
* Without host but with namespace, canonical format::
/cimv2/test:cim_registeredprofile.instanceid="Acme.1"
* Without host and without namespace, standard format::
/:CIM_RegisteredProfile.InstanceID="Acme.1"
Returns:
:term:`unicode string`: Untyped WBEM URI of the CIM instance path,
in the specified format.
Raises:
TypeError: Invalid type in keybindings
ValueError: Invalid format
"""
ret = []
def case(str_):
"""Return the string in the correct lexical case for the format."""
if format == 'canonical':
str_ = str_.lower()
return str_
def case_sorted(keys):
"""Return the keys in the correct order for the format."""
if format == 'canonical':
case_keys = [case(k) for k in keys]
keys = sorted(case_keys)
return keys
if format not in ('standard', 'canonical', 'cimobject', 'historical'):
raise ValueError(
_format("Invalid format argument: {0}", format))
if self.host is not None and format != 'cimobject':
# The CIMObject format assumes there is no host component
ret.append('//')
ret.append(case(self.host))
if self.host is not None or format not in ('cimobject', 'historical'):
ret.append('/')
if self.namespace is not None:
ret.append(case(self.namespace))
if self.namespace is not None or format != 'historical':
ret.append(':')
ret.append(case(self.classname))
ret.append('.')
for key in case_sorted(self.keybindings.iterkeys()):
value = self.keybindings[key]
ret.append(key)
ret.append('=')
if isinstance(value, six.string_types):
# string, char16
ret.append('"')
ret.append(value.
replace('\\', '\\\\').
replace('"', '\\"'))
ret.append('"')
elif isinstance(value, bool):
# boolean
# Note that in Python a bool is an int, so test for bool first
ret.append(str(value).upper())
elif isinstance(value, (CIMFloat, float)):
# realNN
# Since Python 2.7 and Python 3.1, repr() prints float numbers
# with the shortest representation that does not change its
# value. When needed, it shows up to 17 significant digits,
# which is the precision needed to round-trip double precision
# IEE-754 floating point numbers between decimal and binary
# without loss.
ret.append(repr(value))
elif isinstance(value, (CIMInt, int, _Longint)):
# intNN
ret.append(str(value))
elif isinstance(value, CIMInstanceName):
# reference
ret.append('"')
ret.append(value.to_wbem_uri(format=format).
replace('\\', '\\\\').
replace('"', '\\"'))
ret.append('"')
elif isinstance(value, CIMDateTime):
# datetime
ret.append('"')
ret.append(str(value))
ret.append('"')
else:
raise TypeError(
_format("Invalid type {0} in keybinding value: {1!A}={2!A}",
type(value), key, value))
ret.append(',')
del ret[-1]
return _ensure_unicode(''.join(ret)) | def function[to_wbem_uri, parameter[self, format]]:
constant[
Return the (untyped) WBEM URI string of this CIM instance path.
The returned WBEM URI contains its components as follows:
* it does not contain a namespace type (URI scheme).
* it contains an authority (host) component according to the
:attr:`~pywbem.CIMInstanceName.host` attribute, if that is not
`None`. Otherwise, it does not contain the authority component.
* it contains a namespace component according to the
:attr:`~pywbem.CIMInstanceName.namespace` attribute, if that is not
`None`. Otherwise, it does not contain the namespace component.
* it contains a class name component according to the
:attr:`~pywbem.CIMInstanceName.classname` attribute.
* it contains keybindings according to the
:attr:`~pywbem.CIMInstanceName.keybindings` attribute, with the
order of keybindings preserved, and the lexical case of keybinding
names preserved (except when using the format "canonical").
Note that when you do not want some of these components to show up
in the resulting WBEM URI string, you can set them to `None` before
calling this method.
Except when using the format "canonical", this method should not be
used to compare instance paths for equality: :term:`DSP0004` defines
defines several components of an instance path to be compared case
insensitively, including the names of keybindings. In addition, it
defines that the order of keybindings in instance paths does not matter
for the comparison. All WBEM URI formats returned by this method except
for the format "canonical" return a WBEM URI string that preserves the
order of keybindings (relative to how the keybindings were first added
to the :class:`~pywbem.CIMInstanceName` object) and that preserves the
lexical case of any components. Therefore, two instance paths that are
considered equal according to :term:`DSP0004` may not have equal WBEM
URI strings as returned by this method.
Instead, equality of instance paths represented by
:class:`~pywbem.CIMInstanceName` objects should be determined by using
the ``==`` operator, which performs the comparison conformant to
:term:`DSP0004`. If you have WBEM URI strings without the
corresponding :class:`~pywbem.CIMInstanceName` object, such an object
can be created by using the static method
:meth:`~pywbem.CIMInstanceName.from_wbem_uri`.
Parameters:
format (:term:`string`): Format for the generated WBEM URI string,
using one of the following values:
* ``"standard"`` - Standard format that is conformant to untyped
WBEM URIs for instance paths defined in :term:`DSP0207`.
* ``"canonical"`` - Like ``"standard"``, except that the following
items have been converted to lower case: host, namespace,
classname, and the names of any keybindings, and except that the
order of keybindings is in lexical order of the (lower-cased)
keybinding names.
This format guarantees that two instance paths that are
considered equal according to :term:`DSP0004` result in equal
WBEM URI strings. Therefore, the returned WBEM URI is suitable to
be used as a key in dictionaries of CIM instances.
* ``"cimobject"`` - Format for the `CIMObject` header field in
CIM-XML messages for representing instance paths (used
internally, see :term:`DSP0200`).
* ``"historical"`` - Historical format for WBEM URIs (used by
:meth:`~pywbem.CIMInstanceName.__str__`; should not be used by
new code). The historical format has the following differences to
the standard format:
- If the host component is not present, the slash after the
host is also omitted. In the standard format, that slash
is always present.
- If the namespace component is not present, the colon after the
namespace is also omitted. In the standard format, that colon
is always present.
Keybindings that are references use the specified format
recursively.
Examples:
* With host and namespace, standard format::
//ACME.com/cimv2/Test:CIM_RegisteredProfile.InstanceID="Acme.1"
* With host and namespace, canonical format::
//acme.com/cimv2/test:cim_registeredprofile.instanceid="Acme.1"
* Without host but with namespace, standard format::
/cimv2/Test:CIM_RegisteredProfile.InstanceID="Acme.1"
* Without host but with namespace, canonical format::
/cimv2/test:cim_registeredprofile.instanceid="Acme.1"
* Without host and without namespace, standard format::
/:CIM_RegisteredProfile.InstanceID="Acme.1"
Returns:
:term:`unicode string`: Untyped WBEM URI of the CIM instance path,
in the specified format.
Raises:
TypeError: Invalid type in keybindings
ValueError: Invalid format
]
variable[ret] assign[=] list[[]]
def function[case, parameter[str_]]:
constant[Return the string in the correct lexical case for the format.]
if compare[name[format] equal[==] constant[canonical]] begin[:]
variable[str_] assign[=] call[name[str_].lower, parameter[]]
return[name[str_]]
def function[case_sorted, parameter[keys]]:
constant[Return the keys in the correct order for the format.]
if compare[name[format] equal[==] constant[canonical]] begin[:]
variable[case_keys] assign[=] <ast.ListComp object at 0x7da18f09cb20>
variable[keys] assign[=] call[name[sorted], parameter[name[case_keys]]]
return[name[keys]]
if compare[name[format] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20e9600a0>, <ast.Constant object at 0x7da20e9623e0>, <ast.Constant object at 0x7da20e9601c0>, <ast.Constant object at 0x7da20e963070>]]] begin[:]
<ast.Raise object at 0x7da20e9626e0>
if <ast.BoolOp object at 0x7da20e963a90> begin[:]
call[name[ret].append, parameter[constant[//]]]
call[name[ret].append, parameter[call[name[case], parameter[name[self].host]]]]
if <ast.BoolOp object at 0x7da20e9621a0> begin[:]
call[name[ret].append, parameter[constant[/]]]
if compare[name[self].namespace is_not constant[None]] begin[:]
call[name[ret].append, parameter[call[name[case], parameter[name[self].namespace]]]]
if <ast.BoolOp object at 0x7da20e961750> begin[:]
call[name[ret].append, parameter[constant[:]]]
call[name[ret].append, parameter[call[name[case], parameter[name[self].classname]]]]
call[name[ret].append, parameter[constant[.]]]
for taget[name[key]] in starred[call[name[case_sorted], parameter[call[name[self].keybindings.iterkeys, parameter[]]]]] begin[:]
variable[value] assign[=] call[name[self].keybindings][name[key]]
call[name[ret].append, parameter[name[key]]]
call[name[ret].append, parameter[constant[=]]]
if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:]
call[name[ret].append, parameter[constant["]]]
call[name[ret].append, parameter[call[call[name[value].replace, parameter[constant[\], constant[\\]]].replace, parameter[constant["], constant[\"]]]]]
call[name[ret].append, parameter[constant["]]]
call[name[ret].append, parameter[constant[,]]]
<ast.Delete object at 0x7da20c76f100>
return[call[name[_ensure_unicode], parameter[call[constant[].join, parameter[name[ret]]]]]] | keyword[def] identifier[to_wbem_uri] ( identifier[self] , identifier[format] = literal[string] ):
literal[string]
identifier[ret] =[]
keyword[def] identifier[case] ( identifier[str_] ):
literal[string]
keyword[if] identifier[format] == literal[string] :
identifier[str_] = identifier[str_] . identifier[lower] ()
keyword[return] identifier[str_]
keyword[def] identifier[case_sorted] ( identifier[keys] ):
literal[string]
keyword[if] identifier[format] == literal[string] :
identifier[case_keys] =[ identifier[case] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[keys] ]
identifier[keys] = identifier[sorted] ( identifier[case_keys] )
keyword[return] identifier[keys]
keyword[if] identifier[format] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] (
identifier[_format] ( literal[string] , identifier[format] ))
keyword[if] identifier[self] . identifier[host] keyword[is] keyword[not] keyword[None] keyword[and] identifier[format] != literal[string] :
identifier[ret] . identifier[append] ( literal[string] )
identifier[ret] . identifier[append] ( identifier[case] ( identifier[self] . identifier[host] ))
keyword[if] identifier[self] . identifier[host] keyword[is] keyword[not] keyword[None] keyword[or] identifier[format] keyword[not] keyword[in] ( literal[string] , literal[string] ):
identifier[ret] . identifier[append] ( literal[string] )
keyword[if] identifier[self] . identifier[namespace] keyword[is] keyword[not] keyword[None] :
identifier[ret] . identifier[append] ( identifier[case] ( identifier[self] . identifier[namespace] ))
keyword[if] identifier[self] . identifier[namespace] keyword[is] keyword[not] keyword[None] keyword[or] identifier[format] != literal[string] :
identifier[ret] . identifier[append] ( literal[string] )
identifier[ret] . identifier[append] ( identifier[case] ( identifier[self] . identifier[classname] ))
identifier[ret] . identifier[append] ( literal[string] )
keyword[for] identifier[key] keyword[in] identifier[case_sorted] ( identifier[self] . identifier[keybindings] . identifier[iterkeys] ()):
identifier[value] = identifier[self] . identifier[keybindings] [ identifier[key] ]
identifier[ret] . identifier[append] ( identifier[key] )
identifier[ret] . identifier[append] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
identifier[ret] . identifier[append] ( literal[string] )
identifier[ret] . identifier[append] ( identifier[value] .
identifier[replace] ( literal[string] , literal[string] ).
identifier[replace] ( literal[string] , literal[string] ))
identifier[ret] . identifier[append] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[bool] ):
identifier[ret] . identifier[append] ( identifier[str] ( identifier[value] ). identifier[upper] ())
keyword[elif] identifier[isinstance] ( identifier[value] ,( identifier[CIMFloat] , identifier[float] )):
identifier[ret] . identifier[append] ( identifier[repr] ( identifier[value] ))
keyword[elif] identifier[isinstance] ( identifier[value] ,( identifier[CIMInt] , identifier[int] , identifier[_Longint] )):
identifier[ret] . identifier[append] ( identifier[str] ( identifier[value] ))
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[CIMInstanceName] ):
identifier[ret] . identifier[append] ( literal[string] )
identifier[ret] . identifier[append] ( identifier[value] . identifier[to_wbem_uri] ( identifier[format] = identifier[format] ).
identifier[replace] ( literal[string] , literal[string] ).
identifier[replace] ( literal[string] , literal[string] ))
identifier[ret] . identifier[append] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[CIMDateTime] ):
identifier[ret] . identifier[append] ( literal[string] )
identifier[ret] . identifier[append] ( identifier[str] ( identifier[value] ))
identifier[ret] . identifier[append] ( literal[string] )
keyword[else] :
keyword[raise] identifier[TypeError] (
identifier[_format] ( literal[string] ,
identifier[type] ( identifier[value] ), identifier[key] , identifier[value] ))
identifier[ret] . identifier[append] ( literal[string] )
keyword[del] identifier[ret] [- literal[int] ]
keyword[return] identifier[_ensure_unicode] ( literal[string] . identifier[join] ( identifier[ret] )) | def to_wbem_uri(self, format='standard'):
# pylint: disable=redefined-builtin
'\n Return the (untyped) WBEM URI string of this CIM instance path.\n\n The returned WBEM URI contains its components as follows:\n\n * it does not contain a namespace type (URI scheme).\n * it contains an authority (host) component according to the\n :attr:`~pywbem.CIMInstanceName.host` attribute, if that is not\n `None`. Otherwise, it does not contain the authority component.\n * it contains a namespace component according to the\n :attr:`~pywbem.CIMInstanceName.namespace` attribute, if that is not\n `None`. Otherwise, it does not contain the namespace component.\n * it contains a class name component according to the\n :attr:`~pywbem.CIMInstanceName.classname` attribute.\n * it contains keybindings according to the\n :attr:`~pywbem.CIMInstanceName.keybindings` attribute, with the\n order of keybindings preserved, and the lexical case of keybinding\n names preserved (except when using the format "canonical").\n\n Note that when you do not want some of these components to show up\n in the resulting WBEM URI string, you can set them to `None` before\n calling this method.\n\n Except when using the format "canonical", this method should not be\n used to compare instance paths for equality: :term:`DSP0004` defines\n defines several components of an instance path to be compared case\n insensitively, including the names of keybindings. In addition, it\n defines that the order of keybindings in instance paths does not matter\n for the comparison. All WBEM URI formats returned by this method except\n for the format "canonical" return a WBEM URI string that preserves the\n order of keybindings (relative to how the keybindings were first added\n to the :class:`~pywbem.CIMInstanceName` object) and that preserves the\n lexical case of any components. Therefore, two instance paths that are\n considered equal according to :term:`DSP0004` may not have equal WBEM\n URI strings as returned by this method.\n\n Instead, equality of instance paths represented by\n :class:`~pywbem.CIMInstanceName` objects should be determined by using\n the ``==`` operator, which performs the comparison conformant to\n :term:`DSP0004`. If you have WBEM URI strings without the\n corresponding :class:`~pywbem.CIMInstanceName` object, such an object\n can be created by using the static method\n :meth:`~pywbem.CIMInstanceName.from_wbem_uri`.\n\n Parameters:\n\n format (:term:`string`): Format for the generated WBEM URI string,\n using one of the following values:\n\n * ``"standard"`` - Standard format that is conformant to untyped\n WBEM URIs for instance paths defined in :term:`DSP0207`.\n\n * ``"canonical"`` - Like ``"standard"``, except that the following\n items have been converted to lower case: host, namespace,\n classname, and the names of any keybindings, and except that the\n order of keybindings is in lexical order of the (lower-cased)\n keybinding names.\n This format guarantees that two instance paths that are\n considered equal according to :term:`DSP0004` result in equal\n WBEM URI strings. Therefore, the returned WBEM URI is suitable to\n be used as a key in dictionaries of CIM instances.\n\n * ``"cimobject"`` - Format for the `CIMObject` header field in\n CIM-XML messages for representing instance paths (used\n internally, see :term:`DSP0200`).\n\n * ``"historical"`` - Historical format for WBEM URIs (used by\n :meth:`~pywbem.CIMInstanceName.__str__`; should not be used by\n new code). The historical format has the following differences to\n the standard format:\n\n - If the host component is not present, the slash after the\n host is also omitted. In the standard format, that slash\n is always present.\n\n - If the namespace component is not present, the colon after the\n namespace is also omitted. In the standard format, that colon\n is always present.\n\n Keybindings that are references use the specified format\n recursively.\n\n Examples:\n\n * With host and namespace, standard format::\n\n //ACME.com/cimv2/Test:CIM_RegisteredProfile.InstanceID="Acme.1"\n\n * With host and namespace, canonical format::\n\n //acme.com/cimv2/test:cim_registeredprofile.instanceid="Acme.1"\n\n * Without host but with namespace, standard format::\n\n /cimv2/Test:CIM_RegisteredProfile.InstanceID="Acme.1"\n\n * Without host but with namespace, canonical format::\n\n /cimv2/test:cim_registeredprofile.instanceid="Acme.1"\n\n * Without host and without namespace, standard format::\n\n /:CIM_RegisteredProfile.InstanceID="Acme.1"\n\n Returns:\n\n :term:`unicode string`: Untyped WBEM URI of the CIM instance path,\n in the specified format.\n\n Raises:\n\n TypeError: Invalid type in keybindings\n ValueError: Invalid format\n '
ret = []
def case(str_):
"""Return the string in the correct lexical case for the format."""
if format == 'canonical':
str_ = str_.lower() # depends on [control=['if'], data=[]]
return str_
def case_sorted(keys):
"""Return the keys in the correct order for the format."""
if format == 'canonical':
case_keys = [case(k) for k in keys]
keys = sorted(case_keys) # depends on [control=['if'], data=[]]
return keys
if format not in ('standard', 'canonical', 'cimobject', 'historical'):
raise ValueError(_format('Invalid format argument: {0}', format)) # depends on [control=['if'], data=['format']]
if self.host is not None and format != 'cimobject':
# The CIMObject format assumes there is no host component
ret.append('//')
ret.append(case(self.host)) # depends on [control=['if'], data=[]]
if self.host is not None or format not in ('cimobject', 'historical'):
ret.append('/') # depends on [control=['if'], data=[]]
if self.namespace is not None:
ret.append(case(self.namespace)) # depends on [control=['if'], data=[]]
if self.namespace is not None or format != 'historical':
ret.append(':') # depends on [control=['if'], data=[]]
ret.append(case(self.classname))
ret.append('.')
for key in case_sorted(self.keybindings.iterkeys()):
value = self.keybindings[key]
ret.append(key)
ret.append('=')
if isinstance(value, six.string_types):
# string, char16
ret.append('"')
ret.append(value.replace('\\', '\\\\').replace('"', '\\"'))
ret.append('"') # depends on [control=['if'], data=[]]
elif isinstance(value, bool):
# boolean
# Note that in Python a bool is an int, so test for bool first
ret.append(str(value).upper()) # depends on [control=['if'], data=[]]
elif isinstance(value, (CIMFloat, float)):
# realNN
# Since Python 2.7 and Python 3.1, repr() prints float numbers
# with the shortest representation that does not change its
# value. When needed, it shows up to 17 significant digits,
# which is the precision needed to round-trip double precision
# IEE-754 floating point numbers between decimal and binary
# without loss.
ret.append(repr(value)) # depends on [control=['if'], data=[]]
elif isinstance(value, (CIMInt, int, _Longint)):
# intNN
ret.append(str(value)) # depends on [control=['if'], data=[]]
elif isinstance(value, CIMInstanceName):
# reference
ret.append('"')
ret.append(value.to_wbem_uri(format=format).replace('\\', '\\\\').replace('"', '\\"'))
ret.append('"') # depends on [control=['if'], data=[]]
elif isinstance(value, CIMDateTime):
# datetime
ret.append('"')
ret.append(str(value))
ret.append('"') # depends on [control=['if'], data=[]]
else:
raise TypeError(_format('Invalid type {0} in keybinding value: {1!A}={2!A}', type(value), key, value))
ret.append(',') # depends on [control=['for'], data=['key']]
del ret[-1]
return _ensure_unicode(''.join(ret)) |
def describe_instances(self, *instance_ids):
"""Describe current instances."""
instances = {}
for pos, instance_id in enumerate(instance_ids):
instances["InstanceId.%d" % (pos + 1)] = instance_id
query = self.query_factory(
action="DescribeInstances", creds=self.creds,
endpoint=self.endpoint, other_params=instances)
d = query.submit()
return d.addCallback(self.parser.describe_instances) | def function[describe_instances, parameter[self]]:
constant[Describe current instances.]
variable[instances] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6e4250>, <ast.Name object at 0x7da20c6e6cb0>]]] in starred[call[name[enumerate], parameter[name[instance_ids]]]] begin[:]
call[name[instances]][binary_operation[constant[InstanceId.%d] <ast.Mod object at 0x7da2590d6920> binary_operation[name[pos] + constant[1]]]] assign[=] name[instance_id]
variable[query] assign[=] call[name[self].query_factory, parameter[]]
variable[d] assign[=] call[name[query].submit, parameter[]]
return[call[name[d].addCallback, parameter[name[self].parser.describe_instances]]] | keyword[def] identifier[describe_instances] ( identifier[self] ,* identifier[instance_ids] ):
literal[string]
identifier[instances] ={}
keyword[for] identifier[pos] , identifier[instance_id] keyword[in] identifier[enumerate] ( identifier[instance_ids] ):
identifier[instances] [ literal[string] %( identifier[pos] + literal[int] )]= identifier[instance_id]
identifier[query] = identifier[self] . identifier[query_factory] (
identifier[action] = literal[string] , identifier[creds] = identifier[self] . identifier[creds] ,
identifier[endpoint] = identifier[self] . identifier[endpoint] , identifier[other_params] = identifier[instances] )
identifier[d] = identifier[query] . identifier[submit] ()
keyword[return] identifier[d] . identifier[addCallback] ( identifier[self] . identifier[parser] . identifier[describe_instances] ) | def describe_instances(self, *instance_ids):
"""Describe current instances."""
instances = {}
for (pos, instance_id) in enumerate(instance_ids):
instances['InstanceId.%d' % (pos + 1)] = instance_id # depends on [control=['for'], data=[]]
query = self.query_factory(action='DescribeInstances', creds=self.creds, endpoint=self.endpoint, other_params=instances)
d = query.submit()
return d.addCallback(self.parser.describe_instances) |
def get(self):
"""
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion | def function[get, parameter[self]]:
constant[
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
]
call[name[self].log.info, parameter[constant[starting the ``get`` method]]]
variable[unit_conversion] assign[=] constant[None]
call[name[self].log.info, parameter[constant[completed the ``get`` method]]]
return[name[unit_conversion]] | keyword[def] identifier[get] ( identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
identifier[unit_conversion] = keyword[None]
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[return] identifier[unit_conversion] | def get(self):
"""
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion |
def packages(self):
"""
Show all packages
"""
pattern = re.compile(r'package:(/[^=]+\.apk)=([^\s]+)')
packages = []
for line in self.shell('pm', 'list', 'packages', '-f').splitlines():
m = pattern.match(line)
if not m:
continue
path, name = m.group(1), m.group(2)
packages.append(self.Package(name, path))
return packages | def function[packages, parameter[self]]:
constant[
Show all packages
]
variable[pattern] assign[=] call[name[re].compile, parameter[constant[package:(/[^=]+\.apk)=([^\s]+)]]]
variable[packages] assign[=] list[[]]
for taget[name[line]] in starred[call[call[name[self].shell, parameter[constant[pm], constant[list], constant[packages], constant[-f]]].splitlines, parameter[]]] begin[:]
variable[m] assign[=] call[name[pattern].match, parameter[name[line]]]
if <ast.UnaryOp object at 0x7da204564e50> begin[:]
continue
<ast.Tuple object at 0x7da204565ea0> assign[=] tuple[[<ast.Call object at 0x7da2045651e0>, <ast.Call object at 0x7da204566bf0>]]
call[name[packages].append, parameter[call[name[self].Package, parameter[name[name], name[path]]]]]
return[name[packages]] | keyword[def] identifier[packages] ( identifier[self] ):
literal[string]
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[packages] =[]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[shell] ( literal[string] , literal[string] , literal[string] , literal[string] ). identifier[splitlines] ():
identifier[m] = identifier[pattern] . identifier[match] ( identifier[line] )
keyword[if] keyword[not] identifier[m] :
keyword[continue]
identifier[path] , identifier[name] = identifier[m] . identifier[group] ( literal[int] ), identifier[m] . identifier[group] ( literal[int] )
identifier[packages] . identifier[append] ( identifier[self] . identifier[Package] ( identifier[name] , identifier[path] ))
keyword[return] identifier[packages] | def packages(self):
"""
Show all packages
"""
pattern = re.compile('package:(/[^=]+\\.apk)=([^\\s]+)')
packages = []
for line in self.shell('pm', 'list', 'packages', '-f').splitlines():
m = pattern.match(line)
if not m:
continue # depends on [control=['if'], data=[]]
(path, name) = (m.group(1), m.group(2))
packages.append(self.Package(name, path)) # depends on [control=['for'], data=['line']]
return packages |
def eager_partial_regardless(self, fn, *a, **kw):
"""Like `eager_partial`, but applies if callable is not annotated."""
if self.has_annotations(fn):
return self.eager_partial(fn, *a, **kw)
return functools.partial(fn, *a, **kw) | def function[eager_partial_regardless, parameter[self, fn]]:
constant[Like `eager_partial`, but applies if callable is not annotated.]
if call[name[self].has_annotations, parameter[name[fn]]] begin[:]
return[call[name[self].eager_partial, parameter[name[fn], <ast.Starred object at 0x7da20c6c41c0>]]]
return[call[name[functools].partial, parameter[name[fn], <ast.Starred object at 0x7da18f58cca0>]]] | keyword[def] identifier[eager_partial_regardless] ( identifier[self] , identifier[fn] ,* identifier[a] ,** identifier[kw] ):
literal[string]
keyword[if] identifier[self] . identifier[has_annotations] ( identifier[fn] ):
keyword[return] identifier[self] . identifier[eager_partial] ( identifier[fn] ,* identifier[a] ,** identifier[kw] )
keyword[return] identifier[functools] . identifier[partial] ( identifier[fn] ,* identifier[a] ,** identifier[kw] ) | def eager_partial_regardless(self, fn, *a, **kw):
"""Like `eager_partial`, but applies if callable is not annotated."""
if self.has_annotations(fn):
return self.eager_partial(fn, *a, **kw) # depends on [control=['if'], data=[]]
return functools.partial(fn, *a, **kw) |
def imagej_shape(shape, rgb=None):
"""Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
"""
shape = tuple(int(i) for i in shape)
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional')
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2
if rgb and shape[-1] not in (3, 4):
raise ValueError('invalid ImageJ hyperstack: not a RGB image')
if not rgb and ndim == 6 and shape[-1] != 1:
raise ValueError('invalid ImageJ hyperstack: not a non-RGB image')
if rgb or shape[-1] == 1:
return (1, ) * (6 - ndim) + shape
return (1, ) * (5 - ndim) + shape + (1,) | def function[imagej_shape, parameter[shape, rgb]]:
constant[Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
]
variable[shape] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b19d31f0>]]
variable[ndim] assign[=] call[name[len], parameter[name[shape]]]
if compare[constant[1] greater[>] name[ndim]] begin[:]
<ast.Raise object at 0x7da1b19d3850>
if compare[name[rgb] is constant[None]] begin[:]
variable[rgb] assign[=] <ast.BoolOp object at 0x7da1b19d20b0>
if <ast.BoolOp object at 0x7da20e9b0400> begin[:]
<ast.Raise object at 0x7da20e9b1060>
if <ast.BoolOp object at 0x7da20e9b2260> begin[:]
<ast.Raise object at 0x7da20e9b1870>
if <ast.BoolOp object at 0x7da20e9b2950> begin[:]
return[binary_operation[binary_operation[tuple[[<ast.Constant object at 0x7da20e9b1ea0>]] * binary_operation[constant[6] - name[ndim]]] + name[shape]]]
return[binary_operation[binary_operation[binary_operation[tuple[[<ast.Constant object at 0x7da20e9b2200>]] * binary_operation[constant[5] - name[ndim]]] + name[shape]] + tuple[[<ast.Constant object at 0x7da20e9b1720>]]]] | keyword[def] identifier[imagej_shape] ( identifier[shape] , identifier[rgb] = keyword[None] ):
literal[string]
identifier[shape] = identifier[tuple] ( identifier[int] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[shape] )
identifier[ndim] = identifier[len] ( identifier[shape] )
keyword[if] literal[int] > identifier[ndim] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[rgb] keyword[is] keyword[None] :
identifier[rgb] = identifier[shape] [- literal[int] ] keyword[in] ( literal[int] , literal[int] ) keyword[and] identifier[ndim] > literal[int]
keyword[if] identifier[rgb] keyword[and] identifier[shape] [- literal[int] ] keyword[not] keyword[in] ( literal[int] , literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[rgb] keyword[and] identifier[ndim] == literal[int] keyword[and] identifier[shape] [- literal[int] ]!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[rgb] keyword[or] identifier[shape] [- literal[int] ]== literal[int] :
keyword[return] ( literal[int] ,)*( literal[int] - identifier[ndim] )+ identifier[shape]
keyword[return] ( literal[int] ,)*( literal[int] - identifier[ndim] )+ identifier[shape] +( literal[int] ,) | def imagej_shape(shape, rgb=None):
"""Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
"""
shape = tuple((int(i) for i in shape))
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional') # depends on [control=['if'], data=[]]
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2 # depends on [control=['if'], data=['rgb']]
if rgb and shape[-1] not in (3, 4):
raise ValueError('invalid ImageJ hyperstack: not a RGB image') # depends on [control=['if'], data=[]]
if not rgb and ndim == 6 and (shape[-1] != 1):
raise ValueError('invalid ImageJ hyperstack: not a non-RGB image') # depends on [control=['if'], data=[]]
if rgb or shape[-1] == 1:
return (1,) * (6 - ndim) + shape # depends on [control=['if'], data=[]]
return (1,) * (5 - ndim) + shape + (1,) |
def gday_of_year(self):
"""Return the number of days since January 1 of the given year."""
return (self.date - dt.date(self.date.year, 1, 1)).days | def function[gday_of_year, parameter[self]]:
constant[Return the number of days since January 1 of the given year.]
return[binary_operation[name[self].date - call[name[dt].date, parameter[name[self].date.year, constant[1], constant[1]]]].days] | keyword[def] identifier[gday_of_year] ( identifier[self] ):
literal[string]
keyword[return] ( identifier[self] . identifier[date] - identifier[dt] . identifier[date] ( identifier[self] . identifier[date] . identifier[year] , literal[int] , literal[int] )). identifier[days] | def gday_of_year(self):
"""Return the number of days since January 1 of the given year."""
return (self.date - dt.date(self.date.year, 1, 1)).days |
def valid_replay(info, ping):
"""Make sure the replay isn't corrupt, and is worth looking at."""
if (info.HasField("error") or
info.base_build != ping.base_build or # different game version
info.game_duration_loops < 1000 or
len(info.player_info) != 2):
# Probably corrupt, or just not interesting.
return False
for p in info.player_info:
if p.player_apm < 10 or p.player_mmr < 1000:
# Low APM = player just standing around.
# Low MMR = corrupt replay or player who is weak.
return False
return True | def function[valid_replay, parameter[info, ping]]:
constant[Make sure the replay isn't corrupt, and is worth looking at.]
if <ast.BoolOp object at 0x7da18c4ce530> begin[:]
return[constant[False]]
for taget[name[p]] in starred[name[info].player_info] begin[:]
if <ast.BoolOp object at 0x7da18c4ce0b0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[valid_replay] ( identifier[info] , identifier[ping] ):
literal[string]
keyword[if] ( identifier[info] . identifier[HasField] ( literal[string] ) keyword[or]
identifier[info] . identifier[base_build] != identifier[ping] . identifier[base_build] keyword[or]
identifier[info] . identifier[game_duration_loops] < literal[int] keyword[or]
identifier[len] ( identifier[info] . identifier[player_info] )!= literal[int] ):
keyword[return] keyword[False]
keyword[for] identifier[p] keyword[in] identifier[info] . identifier[player_info] :
keyword[if] identifier[p] . identifier[player_apm] < literal[int] keyword[or] identifier[p] . identifier[player_mmr] < literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def valid_replay(info, ping):
"""Make sure the replay isn't corrupt, and is worth looking at."""
if info.HasField('error') or info.base_build != ping.base_build or info.game_duration_loops < 1000 or (len(info.player_info) != 2): # different game version
# Probably corrupt, or just not interesting.
return False # depends on [control=['if'], data=[]]
for p in info.player_info:
if p.player_apm < 10 or p.player_mmr < 1000:
# Low APM = player just standing around.
# Low MMR = corrupt replay or player who is weak.
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
return True |
def set_mainswitch_state(self, state):
"""Turns output on or off. Also turns hardware on ir off"""
if self.state.mainswitch == state:
err_msg = "MainSwitch unchanged, already is {sState}".format(sState="On" if state else "Off") # fo obar lorem ipsum
logging.debug(err_msg) # fo obar lorem ipsum
return (False, 0, err_msg) # because nothing changed
self.state.mainswitch = state
sequence_number = self.zmq_publisher.publish_mainswitch_state(state)
logging.debug("MainSwitch toggled, new state is {sState}".format(sState="On" if state else "Off")) # fo obar lorem ipsum
if state is True:
self.hw_communication.switch_on()
self._activate_scene() # reinit scene
else:
self._deactivate_scene()
self.hw_communication.switch_off()
return (True, sequence_number, "OK") | def function[set_mainswitch_state, parameter[self, state]]:
constant[Turns output on or off. Also turns hardware on ir off]
if compare[name[self].state.mainswitch equal[==] name[state]] begin[:]
variable[err_msg] assign[=] call[constant[MainSwitch unchanged, already is {sState}].format, parameter[]]
call[name[logging].debug, parameter[name[err_msg]]]
return[tuple[[<ast.Constant object at 0x7da1b158a920>, <ast.Constant object at 0x7da1b158a950>, <ast.Name object at 0x7da1b158a980>]]]
name[self].state.mainswitch assign[=] name[state]
variable[sequence_number] assign[=] call[name[self].zmq_publisher.publish_mainswitch_state, parameter[name[state]]]
call[name[logging].debug, parameter[call[constant[MainSwitch toggled, new state is {sState}].format, parameter[]]]]
if compare[name[state] is constant[True]] begin[:]
call[name[self].hw_communication.switch_on, parameter[]]
call[name[self]._activate_scene, parameter[]]
return[tuple[[<ast.Constant object at 0x7da1b15892a0>, <ast.Name object at 0x7da1b1588ac0>, <ast.Constant object at 0x7da1b1589ff0>]]] | keyword[def] identifier[set_mainswitch_state] ( identifier[self] , identifier[state] ):
literal[string]
keyword[if] identifier[self] . identifier[state] . identifier[mainswitch] == identifier[state] :
identifier[err_msg] = literal[string] . identifier[format] ( identifier[sState] = literal[string] keyword[if] identifier[state] keyword[else] literal[string] )
identifier[logging] . identifier[debug] ( identifier[err_msg] )
keyword[return] ( keyword[False] , literal[int] , identifier[err_msg] )
identifier[self] . identifier[state] . identifier[mainswitch] = identifier[state]
identifier[sequence_number] = identifier[self] . identifier[zmq_publisher] . identifier[publish_mainswitch_state] ( identifier[state] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[sState] = literal[string] keyword[if] identifier[state] keyword[else] literal[string] ))
keyword[if] identifier[state] keyword[is] keyword[True] :
identifier[self] . identifier[hw_communication] . identifier[switch_on] ()
identifier[self] . identifier[_activate_scene] ()
keyword[else] :
identifier[self] . identifier[_deactivate_scene] ()
identifier[self] . identifier[hw_communication] . identifier[switch_off] ()
keyword[return] ( keyword[True] , identifier[sequence_number] , literal[string] ) | def set_mainswitch_state(self, state):
"""Turns output on or off. Also turns hardware on ir off"""
if self.state.mainswitch == state:
err_msg = 'MainSwitch unchanged, already is {sState}'.format(sState='On' if state else 'Off') # fo obar lorem ipsum
logging.debug(err_msg) # fo obar lorem ipsum
return (False, 0, err_msg) # because nothing changed # depends on [control=['if'], data=['state']]
self.state.mainswitch = state
sequence_number = self.zmq_publisher.publish_mainswitch_state(state)
logging.debug('MainSwitch toggled, new state is {sState}'.format(sState='On' if state else 'Off')) # fo obar lorem ipsum
if state is True:
self.hw_communication.switch_on()
self._activate_scene() # reinit scene # depends on [control=['if'], data=[]]
else:
self._deactivate_scene()
self.hw_communication.switch_off()
return (True, sequence_number, 'OK') |
def pairs(args):
"""
%prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2).
"""
from jcvi.assembly.preprocess import run_FastbAndQualb2Fastq
p = OptionParser(pairs.__doc__)
p.add_option("--header", default=False, action="store_true",
help="Print header only [default: %default]")
p.add_option("--suffix", default=False, action="store_true",
help="Add suffix /1, /2 to read names")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, fastqfile = args
pf = op.basename(fastqfile).split(".")[0]
p = PairsFile(pairsfile)
print(p.header, file=sys.stderr)
if opts.header:
return
if fastqfile.endswith(".fastb"):
fastbfile = fastqfile
fastqfile = fastbfile.replace(".fastb", ".fastq")
run_FastbAndQualb2Fastq(infile=fastbfile, outfile=fastqfile)
p1file = "{0}.1.corr.fastq"
p2file = "{0}.2.corr.fastq"
fragsfile = "{0}.corr.fastq"
p1fw = [open(p1file.format(x), "w") for x in p.libnames]
p2fw = [open(p2file.format(x), "w") for x in p.libnames]
fragsfw = open(fragsfile.format(pf), "w")
extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=opts.suffix) | def function[pairs, parameter[args]]:
constant[
%prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2).
]
from relative_module[jcvi.assembly.preprocess] import module[run_FastbAndQualb2Fastq]
variable[p] assign[=] call[name[OptionParser], parameter[name[pairs].__doc__]]
call[name[p].add_option, parameter[constant[--header]]]
call[name[p].add_option, parameter[constant[--suffix]]]
<ast.Tuple object at 0x7da18c4cc6a0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18c4cf610>]]
<ast.Tuple object at 0x7da18c4cc370> assign[=] name[args]
variable[pf] assign[=] call[call[call[name[op].basename, parameter[name[fastqfile]]].split, parameter[constant[.]]]][constant[0]]
variable[p] assign[=] call[name[PairsFile], parameter[name[pairsfile]]]
call[name[print], parameter[name[p].header]]
if name[opts].header begin[:]
return[None]
if call[name[fastqfile].endswith, parameter[constant[.fastb]]] begin[:]
variable[fastbfile] assign[=] name[fastqfile]
variable[fastqfile] assign[=] call[name[fastbfile].replace, parameter[constant[.fastb], constant[.fastq]]]
call[name[run_FastbAndQualb2Fastq], parameter[]]
variable[p1file] assign[=] constant[{0}.1.corr.fastq]
variable[p2file] assign[=] constant[{0}.2.corr.fastq]
variable[fragsfile] assign[=] constant[{0}.corr.fastq]
variable[p1fw] assign[=] <ast.ListComp object at 0x7da18c4cecb0>
variable[p2fw] assign[=] <ast.ListComp object at 0x7da18c4ceec0>
variable[fragsfw] assign[=] call[name[open], parameter[call[name[fragsfile].format, parameter[name[pf]]], constant[w]]]
call[name[extract_pairs], parameter[name[fastqfile], name[p1fw], name[p2fw], name[fragsfw], name[p]]] | keyword[def] identifier[pairs] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[assembly] . identifier[preprocess] keyword[import] identifier[run_FastbAndQualb2Fastq]
identifier[p] = identifier[OptionParser] ( identifier[pairs] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[pairsfile] , identifier[fastqfile] = identifier[args]
identifier[pf] = identifier[op] . identifier[basename] ( identifier[fastqfile] ). identifier[split] ( literal[string] )[ literal[int] ]
identifier[p] = identifier[PairsFile] ( identifier[pairsfile] )
identifier[print] ( identifier[p] . identifier[header] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[if] identifier[opts] . identifier[header] :
keyword[return]
keyword[if] identifier[fastqfile] . identifier[endswith] ( literal[string] ):
identifier[fastbfile] = identifier[fastqfile]
identifier[fastqfile] = identifier[fastbfile] . identifier[replace] ( literal[string] , literal[string] )
identifier[run_FastbAndQualb2Fastq] ( identifier[infile] = identifier[fastbfile] , identifier[outfile] = identifier[fastqfile] )
identifier[p1file] = literal[string]
identifier[p2file] = literal[string]
identifier[fragsfile] = literal[string]
identifier[p1fw] =[ identifier[open] ( identifier[p1file] . identifier[format] ( identifier[x] ), literal[string] ) keyword[for] identifier[x] keyword[in] identifier[p] . identifier[libnames] ]
identifier[p2fw] =[ identifier[open] ( identifier[p2file] . identifier[format] ( identifier[x] ), literal[string] ) keyword[for] identifier[x] keyword[in] identifier[p] . identifier[libnames] ]
identifier[fragsfw] = identifier[open] ( identifier[fragsfile] . identifier[format] ( identifier[pf] ), literal[string] )
identifier[extract_pairs] ( identifier[fastqfile] , identifier[p1fw] , identifier[p2fw] , identifier[fragsfw] , identifier[p] , identifier[suffix] = identifier[opts] . identifier[suffix] ) | def pairs(args):
"""
%prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2).
"""
from jcvi.assembly.preprocess import run_FastbAndQualb2Fastq
p = OptionParser(pairs.__doc__)
p.add_option('--header', default=False, action='store_true', help='Print header only [default: %default]')
p.add_option('--suffix', default=False, action='store_true', help='Add suffix /1, /2 to read names')
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(pairsfile, fastqfile) = args
pf = op.basename(fastqfile).split('.')[0]
p = PairsFile(pairsfile)
print(p.header, file=sys.stderr)
if opts.header:
return # depends on [control=['if'], data=[]]
if fastqfile.endswith('.fastb'):
fastbfile = fastqfile
fastqfile = fastbfile.replace('.fastb', '.fastq')
run_FastbAndQualb2Fastq(infile=fastbfile, outfile=fastqfile) # depends on [control=['if'], data=[]]
p1file = '{0}.1.corr.fastq'
p2file = '{0}.2.corr.fastq'
fragsfile = '{0}.corr.fastq'
p1fw = [open(p1file.format(x), 'w') for x in p.libnames]
p2fw = [open(p2file.format(x), 'w') for x in p.libnames]
fragsfw = open(fragsfile.format(pf), 'w')
extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=opts.suffix) |
def nlmsg_reserve(n, len_, pad):
"""Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None.
"""
nlmsg_len_ = n.nm_nlh.nlmsg_len
tlen = len_ if not pad else ((len_ + (pad - 1)) & ~(pad - 1))
if tlen + nlmsg_len_ > n.nm_size:
return None
buf = bytearray_ptr(n.nm_nlh.bytearray, nlmsg_len_)
n.nm_nlh.nlmsg_len += tlen
if tlen > len_:
bytearray_ptr(buf, len_, tlen)[:] = bytearray(b'\0') * (tlen - len_)
_LOGGER.debug('msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d', id(n), tlen, len_, pad, n.nm_nlh.nlmsg_len)
return buf | def function[nlmsg_reserve, parameter[n, len_, pad]]:
constant[Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None.
]
variable[nlmsg_len_] assign[=] name[n].nm_nlh.nlmsg_len
variable[tlen] assign[=] <ast.IfExp object at 0x7da1b2657a60>
if compare[binary_operation[name[tlen] + name[nlmsg_len_]] greater[>] name[n].nm_size] begin[:]
return[constant[None]]
variable[buf] assign[=] call[name[bytearray_ptr], parameter[name[n].nm_nlh.bytearray, name[nlmsg_len_]]]
<ast.AugAssign object at 0x7da1b2657640>
if compare[name[tlen] greater[>] name[len_]] begin[:]
call[call[name[bytearray_ptr], parameter[name[buf], name[len_], name[tlen]]]][<ast.Slice object at 0x7da1b2657e50>] assign[=] binary_operation[call[name[bytearray], parameter[constant[b'\x00']]] * binary_operation[name[tlen] - name[len_]]]
call[name[_LOGGER].debug, parameter[constant[msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d], call[name[id], parameter[name[n]]], name[tlen], name[len_], name[pad], name[n].nm_nlh.nlmsg_len]]
return[name[buf]] | keyword[def] identifier[nlmsg_reserve] ( identifier[n] , identifier[len_] , identifier[pad] ):
literal[string]
identifier[nlmsg_len_] = identifier[n] . identifier[nm_nlh] . identifier[nlmsg_len]
identifier[tlen] = identifier[len_] keyword[if] keyword[not] identifier[pad] keyword[else] (( identifier[len_] +( identifier[pad] - literal[int] ))&~( identifier[pad] - literal[int] ))
keyword[if] identifier[tlen] + identifier[nlmsg_len_] > identifier[n] . identifier[nm_size] :
keyword[return] keyword[None]
identifier[buf] = identifier[bytearray_ptr] ( identifier[n] . identifier[nm_nlh] . identifier[bytearray] , identifier[nlmsg_len_] )
identifier[n] . identifier[nm_nlh] . identifier[nlmsg_len] += identifier[tlen]
keyword[if] identifier[tlen] > identifier[len_] :
identifier[bytearray_ptr] ( identifier[buf] , identifier[len_] , identifier[tlen] )[:]= identifier[bytearray] ( literal[string] )*( identifier[tlen] - identifier[len_] )
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[id] ( identifier[n] ), identifier[tlen] , identifier[len_] , identifier[pad] , identifier[n] . identifier[nm_nlh] . identifier[nlmsg_len] )
keyword[return] identifier[buf] | def nlmsg_reserve(n, len_, pad):
"""Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None.
"""
nlmsg_len_ = n.nm_nlh.nlmsg_len
tlen = len_ if not pad else len_ + (pad - 1) & ~(pad - 1)
if tlen + nlmsg_len_ > n.nm_size:
return None # depends on [control=['if'], data=[]]
buf = bytearray_ptr(n.nm_nlh.bytearray, nlmsg_len_)
n.nm_nlh.nlmsg_len += tlen
if tlen > len_:
bytearray_ptr(buf, len_, tlen)[:] = bytearray(b'\x00') * (tlen - len_) # depends on [control=['if'], data=['tlen', 'len_']]
_LOGGER.debug('msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d', id(n), tlen, len_, pad, n.nm_nlh.nlmsg_len)
return buf |
async def ensure_usable_media(self, media: BaseMedia) -> UrlMedia:
"""
So far, let's just accept URL media. We'll see in the future how it
goes.
"""
if not isinstance(media, UrlMedia):
raise ValueError('Facebook platform only accepts URL media')
return media | <ast.AsyncFunctionDef object at 0x7da204567c70> | keyword[async] keyword[def] identifier[ensure_usable_media] ( identifier[self] , identifier[media] : identifier[BaseMedia] )-> identifier[UrlMedia] :
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[media] , identifier[UrlMedia] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[media] | async def ensure_usable_media(self, media: BaseMedia) -> UrlMedia:
"""
So far, let's just accept URL media. We'll see in the future how it
goes.
"""
if not isinstance(media, UrlMedia):
raise ValueError('Facebook platform only accepts URL media') # depends on [control=['if'], data=[]]
return media |
def refactor_move_module(self, new_name):
"""Move the current module."""
refactor = create_move(self.project, self.resource)
resource = path_to_resource(self.project, new_name)
return self._get_changes(refactor, resource) | def function[refactor_move_module, parameter[self, new_name]]:
constant[Move the current module.]
variable[refactor] assign[=] call[name[create_move], parameter[name[self].project, name[self].resource]]
variable[resource] assign[=] call[name[path_to_resource], parameter[name[self].project, name[new_name]]]
return[call[name[self]._get_changes, parameter[name[refactor], name[resource]]]] | keyword[def] identifier[refactor_move_module] ( identifier[self] , identifier[new_name] ):
literal[string]
identifier[refactor] = identifier[create_move] ( identifier[self] . identifier[project] , identifier[self] . identifier[resource] )
identifier[resource] = identifier[path_to_resource] ( identifier[self] . identifier[project] , identifier[new_name] )
keyword[return] identifier[self] . identifier[_get_changes] ( identifier[refactor] , identifier[resource] ) | def refactor_move_module(self, new_name):
"""Move the current module."""
refactor = create_move(self.project, self.resource)
resource = path_to_resource(self.project, new_name)
return self._get_changes(refactor, resource) |
def __add_actions(self):
"""
Sets Component actions.
"""
LOGGER.debug("> Adding '{0}' Component actions.".format(self.__class__.__name__))
self.__script_editor.command_menu.addSeparator()
self.__script_editor.command_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|addons.tcp_serverUi|&Command|Send Selection To Server",
shortcut=Qt.ControlModifier + Qt.AltModifier + Qt.Key_Return,
slot=self.__send_selection_to_server_action__triggered))
self.__script_editor.command_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|addons.tcp_serverUi|&Command|&Send Current File To Server",
shortcut=Qt.SHIFT + Qt.AltModifier + Qt.CTRL + Qt.Key_Return,
slot=self.__send_file_to_server_action__triggered)) | def function[__add_actions, parameter[self]]:
constant[
Sets Component actions.
]
call[name[LOGGER].debug, parameter[call[constant[> Adding '{0}' Component actions.].format, parameter[name[self].__class__.__name__]]]]
call[name[self].__script_editor.command_menu.addSeparator, parameter[]]
call[name[self].__script_editor.command_menu.addAction, parameter[call[name[self].__engine.actions_manager.register_action, parameter[constant[Actions|Umbra|Components|addons.tcp_serverUi|&Command|Send Selection To Server]]]]]
call[name[self].__script_editor.command_menu.addAction, parameter[call[name[self].__engine.actions_manager.register_action, parameter[constant[Actions|Umbra|Components|addons.tcp_serverUi|&Command|&Send Current File To Server]]]]] | keyword[def] identifier[__add_actions] ( identifier[self] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] ))
identifier[self] . identifier[__script_editor] . identifier[command_menu] . identifier[addSeparator] ()
identifier[self] . identifier[__script_editor] . identifier[command_menu] . identifier[addAction] ( identifier[self] . identifier[__engine] . identifier[actions_manager] . identifier[register_action] (
literal[string] ,
identifier[shortcut] = identifier[Qt] . identifier[ControlModifier] + identifier[Qt] . identifier[AltModifier] + identifier[Qt] . identifier[Key_Return] ,
identifier[slot] = identifier[self] . identifier[__send_selection_to_server_action__triggered] ))
identifier[self] . identifier[__script_editor] . identifier[command_menu] . identifier[addAction] ( identifier[self] . identifier[__engine] . identifier[actions_manager] . identifier[register_action] (
literal[string] ,
identifier[shortcut] = identifier[Qt] . identifier[SHIFT] + identifier[Qt] . identifier[AltModifier] + identifier[Qt] . identifier[CTRL] + identifier[Qt] . identifier[Key_Return] ,
identifier[slot] = identifier[self] . identifier[__send_file_to_server_action__triggered] )) | def __add_actions(self):
"""
Sets Component actions.
"""
LOGGER.debug("> Adding '{0}' Component actions.".format(self.__class__.__name__))
self.__script_editor.command_menu.addSeparator()
self.__script_editor.command_menu.addAction(self.__engine.actions_manager.register_action('Actions|Umbra|Components|addons.tcp_serverUi|&Command|Send Selection To Server', shortcut=Qt.ControlModifier + Qt.AltModifier + Qt.Key_Return, slot=self.__send_selection_to_server_action__triggered))
self.__script_editor.command_menu.addAction(self.__engine.actions_manager.register_action('Actions|Umbra|Components|addons.tcp_serverUi|&Command|&Send Current File To Server', shortcut=Qt.SHIFT + Qt.AltModifier + Qt.CTRL + Qt.Key_Return, slot=self.__send_file_to_server_action__triggered)) |
def t_GE(self, t):
r"\>\="
t.endlexpos = t.lexpos + len(t.value)
return t | def function[t_GE, parameter[self, t]]:
constant[\>\=]
name[t].endlexpos assign[=] binary_operation[name[t].lexpos + call[name[len], parameter[name[t].value]]]
return[name[t]] | keyword[def] identifier[t_GE] ( identifier[self] , identifier[t] ):
literal[string]
identifier[t] . identifier[endlexpos] = identifier[t] . identifier[lexpos] + identifier[len] ( identifier[t] . identifier[value] )
keyword[return] identifier[t] | def t_GE(self, t):
"""\\>\\="""
t.endlexpos = t.lexpos + len(t.value)
return t |
def _shorten_line(tokens, source, indentation, indent_word,
aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type,
token_string,
start_offset,
end_offset) in token_offsets(tokens):
if (
token_type == tokenize.COMMENT and
not is_probably_part_of_multiline(previous_line) and
not is_probably_part_of_multiline(source) and
not source[start_offset + 1:].strip().lower().startswith(
('noqa', 'pragma:', 'pylint:'))
):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' +
indentation + first.strip() + '\n')
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if (first.rstrip().endswith('(') and
source[end_offset:].lstrip().startswith(')')):
pass
elif first.rstrip().endswith('('):
second_indent += indent_word
elif '(' in first:
second_indent += ' ' * (1 + first.find('('))
else:
second_indent += indent_word
second = (second_indent + source[end_offset:].lstrip())
if (
not second.strip() or
second.lstrip().startswith('#')
):
continue
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed)
if aggressive else fixed):
yield indentation + fixed | def function[_shorten_line, parameter[tokens, source, indentation, indent_word, aggressive, previous_line]]:
constant[Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
]
for taget[tuple[[<ast.Name object at 0x7da18dc99090>, <ast.Name object at 0x7da18dc98430>, <ast.Name object at 0x7da18dc997b0>, <ast.Name object at 0x7da18dc9b130>]]] in starred[call[name[token_offsets], parameter[name[tokens]]]] begin[:]
if <ast.BoolOp object at 0x7da18dc98520> begin[:]
variable[first] assign[=] call[name[source]][<ast.Slice object at 0x7da18dc98250>]
variable[second] assign[=] call[name[source]][<ast.Slice object at 0x7da18dc99a20>]
<ast.Yield object at 0x7da18dc98b50> | keyword[def] identifier[_shorten_line] ( identifier[tokens] , identifier[source] , identifier[indentation] , identifier[indent_word] ,
identifier[aggressive] = keyword[False] , identifier[previous_line] = literal[string] ):
literal[string]
keyword[for] ( identifier[token_type] ,
identifier[token_string] ,
identifier[start_offset] ,
identifier[end_offset] ) keyword[in] identifier[token_offsets] ( identifier[tokens] ):
keyword[if] (
identifier[token_type] == identifier[tokenize] . identifier[COMMENT] keyword[and]
keyword[not] identifier[is_probably_part_of_multiline] ( identifier[previous_line] ) keyword[and]
keyword[not] identifier[is_probably_part_of_multiline] ( identifier[source] ) keyword[and]
keyword[not] identifier[source] [ identifier[start_offset] + literal[int] :]. identifier[strip] (). identifier[lower] (). identifier[startswith] (
( literal[string] , literal[string] , literal[string] ))
):
identifier[first] = identifier[source] [: identifier[start_offset] ]
identifier[second] = identifier[source] [ identifier[start_offset] :]
keyword[yield] ( identifier[indentation] + identifier[second] . identifier[strip] ()+ literal[string] +
identifier[indentation] + identifier[first] . identifier[strip] ()+ literal[string] )
keyword[elif] identifier[token_type] == identifier[token] . identifier[OP] keyword[and] identifier[token_string] != literal[string] :
keyword[assert] identifier[token_type] != identifier[token] . identifier[INDENT]
identifier[first] = identifier[source] [: identifier[end_offset] ]
identifier[second_indent] = identifier[indentation]
keyword[if] ( identifier[first] . identifier[rstrip] (). identifier[endswith] ( literal[string] ) keyword[and]
identifier[source] [ identifier[end_offset] :]. identifier[lstrip] (). identifier[startswith] ( literal[string] )):
keyword[pass]
keyword[elif] identifier[first] . identifier[rstrip] (). identifier[endswith] ( literal[string] ):
identifier[second_indent] += identifier[indent_word]
keyword[elif] literal[string] keyword[in] identifier[first] :
identifier[second_indent] += literal[string] *( literal[int] + identifier[first] . identifier[find] ( literal[string] ))
keyword[else] :
identifier[second_indent] += identifier[indent_word]
identifier[second] =( identifier[second_indent] + identifier[source] [ identifier[end_offset] :]. identifier[lstrip] ())
keyword[if] (
keyword[not] identifier[second] . identifier[strip] () keyword[or]
identifier[second] . identifier[lstrip] (). identifier[startswith] ( literal[string] )
):
keyword[continue]
keyword[if] identifier[second] . identifier[lstrip] (). identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[first] . identifier[rstrip] (). identifier[endswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[token_string] keyword[in] literal[string] :
identifier[fixed] = identifier[first] + literal[string] + literal[string] + identifier[second]
keyword[else] :
identifier[fixed] = identifier[first] + literal[string] + identifier[second]
keyword[if] identifier[check_syntax] ( identifier[normalize_multiline] ( identifier[fixed] )
keyword[if] identifier[aggressive] keyword[else] identifier[fixed] ):
keyword[yield] identifier[indentation] + identifier[fixed] | def _shorten_line(tokens, source, indentation, indent_word, aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type, token_string, start_offset, end_offset) in token_offsets(tokens):
if token_type == tokenize.COMMENT and (not is_probably_part_of_multiline(previous_line)) and (not is_probably_part_of_multiline(source)) and (not source[start_offset + 1:].strip().lower().startswith(('noqa', 'pragma:', 'pylint:'))):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' + indentation + first.strip() + '\n') # depends on [control=['if'], data=[]]
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if first.rstrip().endswith('(') and source[end_offset:].lstrip().startswith(')'):
pass # depends on [control=['if'], data=[]]
elif first.rstrip().endswith('('):
second_indent += indent_word # depends on [control=['if'], data=[]]
elif '(' in first:
second_indent += ' ' * (1 + first.find('(')) # depends on [control=['if'], data=['first']]
else:
second_indent += indent_word
second = second_indent + source[end_offset:].lstrip()
if not second.strip() or second.lstrip().startswith('#'):
continue # depends on [control=['if'], data=[]]
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue # depends on [control=['if'], data=[]]
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue # depends on [control=['if'], data=[]]
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second # depends on [control=['if'], data=[]]
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed) if aggressive else fixed):
yield (indentation + fixed) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
async def main():
"""
Main code (synchronous requests)
"""
# Create Client from endpoint string in Duniter format
client = Client(ES_CORE_ENDPOINT)
# Get the current node (direct REST GET request)
print("\nGET g1-test/block/current/_source:")
response = await client.get('g1-test/block/current/_source')
print(response)
# Get the node number 2 with only selected fields (direct REST GET request)
print("\nGET g1-test/block/2/_source:")
response = await client.get('g1-test/block/2/_source', {'_source': 'number,hash,dividend,membersCount'})
print(response)
# Close client aiohttp session
await client.close()
# Create Client from endpoint string in Duniter format
client = Client(ES_USER_ENDPOINT)
# prompt entry
pubkey = input("\nEnter a public key to get the user profile: ")
# Get the profil of a public key (direct REST GET request)
print("\nGET user/profile/{0}/_source:".format(pubkey))
response = await client.get('user/profile/{0}/_source'.format(pubkey.strip(' \n')))
print(response)
# Close client aiohttp session
await client.close() | <ast.AsyncFunctionDef object at 0x7da18f09c340> | keyword[async] keyword[def] identifier[main] ():
literal[string]
identifier[client] = identifier[Client] ( identifier[ES_CORE_ENDPOINT] )
identifier[print] ( literal[string] )
identifier[response] = keyword[await] identifier[client] . identifier[get] ( literal[string] )
identifier[print] ( identifier[response] )
identifier[print] ( literal[string] )
identifier[response] = keyword[await] identifier[client] . identifier[get] ( literal[string] ,{ literal[string] : literal[string] })
identifier[print] ( identifier[response] )
keyword[await] identifier[client] . identifier[close] ()
identifier[client] = identifier[Client] ( identifier[ES_USER_ENDPOINT] )
identifier[pubkey] = identifier[input] ( literal[string] )
identifier[print] ( literal[string] . identifier[format] ( identifier[pubkey] ))
identifier[response] = keyword[await] identifier[client] . identifier[get] ( literal[string] . identifier[format] ( identifier[pubkey] . identifier[strip] ( literal[string] )))
identifier[print] ( identifier[response] )
keyword[await] identifier[client] . identifier[close] () | async def main():
"""
Main code (synchronous requests)
"""
# Create Client from endpoint string in Duniter format
client = Client(ES_CORE_ENDPOINT)
# Get the current node (direct REST GET request)
print('\nGET g1-test/block/current/_source:')
response = await client.get('g1-test/block/current/_source')
print(response)
# Get the node number 2 with only selected fields (direct REST GET request)
print('\nGET g1-test/block/2/_source:')
response = await client.get('g1-test/block/2/_source', {'_source': 'number,hash,dividend,membersCount'})
print(response)
# Close client aiohttp session
await client.close()
# Create Client from endpoint string in Duniter format
client = Client(ES_USER_ENDPOINT)
# prompt entry
pubkey = input('\nEnter a public key to get the user profile: ')
# Get the profil of a public key (direct REST GET request)
print('\nGET user/profile/{0}/_source:'.format(pubkey))
response = await client.get('user/profile/{0}/_source'.format(pubkey.strip(' \n')))
print(response)
# Close client aiohttp session
await client.close() |
def location_based_search(self, lng, lat, distance, unit="miles", attribute_map=None, page=0, limit=50):
"""Search based on location and other attribute filters
:param long lng: Longitude parameter
:param long lat: Latitude parameter
:param int distance: The radius of the query
:param str unit: The unit of measure for the query, defaults to miles
:param dict attribute_map: Additional attributes to apply to the location bases query
:param int page: The page to return
:param int limit: Number of results per page
:returns: List of objects
:rtype: list
"""
#Determine what type of radian conversion you want base on a unit of measure
if unit == "miles":
distance = float(distance/69)
else:
distance = float(distance/111.045)
#Start with geospatial query
query = {
"loc" : {
"$within": {
"$center" : [[lng, lat], distance]}
}
}
#Allow querying additional attributes
if attribute_map:
query = dict(query.items() + attribute_map.items())
results = yield self.find(query, page=page, limit=limit)
raise Return(self._list_cursor_to_json(results)) | def function[location_based_search, parameter[self, lng, lat, distance, unit, attribute_map, page, limit]]:
constant[Search based on location and other attribute filters
:param long lng: Longitude parameter
:param long lat: Latitude parameter
:param int distance: The radius of the query
:param str unit: The unit of measure for the query, defaults to miles
:param dict attribute_map: Additional attributes to apply to the location bases query
:param int page: The page to return
:param int limit: Number of results per page
:returns: List of objects
:rtype: list
]
if compare[name[unit] equal[==] constant[miles]] begin[:]
variable[distance] assign[=] call[name[float], parameter[binary_operation[name[distance] / constant[69]]]]
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da1b15978b0>], [<ast.Dict object at 0x7da1b1594d30>]]
if name[attribute_map] begin[:]
variable[query] assign[=] call[name[dict], parameter[binary_operation[call[name[query].items, parameter[]] + call[name[attribute_map].items, parameter[]]]]]
variable[results] assign[=] <ast.Yield object at 0x7da1b1597760>
<ast.Raise object at 0x7da1b1594190> | keyword[def] identifier[location_based_search] ( identifier[self] , identifier[lng] , identifier[lat] , identifier[distance] , identifier[unit] = literal[string] , identifier[attribute_map] = keyword[None] , identifier[page] = literal[int] , identifier[limit] = literal[int] ):
literal[string]
keyword[if] identifier[unit] == literal[string] :
identifier[distance] = identifier[float] ( identifier[distance] / literal[int] )
keyword[else] :
identifier[distance] = identifier[float] ( identifier[distance] / literal[int] )
identifier[query] ={
literal[string] :{
literal[string] :{
literal[string] :[[ identifier[lng] , identifier[lat] ], identifier[distance] ]}
}
}
keyword[if] identifier[attribute_map] :
identifier[query] = identifier[dict] ( identifier[query] . identifier[items] ()+ identifier[attribute_map] . identifier[items] ())
identifier[results] = keyword[yield] identifier[self] . identifier[find] ( identifier[query] , identifier[page] = identifier[page] , identifier[limit] = identifier[limit] )
keyword[raise] identifier[Return] ( identifier[self] . identifier[_list_cursor_to_json] ( identifier[results] )) | def location_based_search(self, lng, lat, distance, unit='miles', attribute_map=None, page=0, limit=50):
"""Search based on location and other attribute filters
:param long lng: Longitude parameter
:param long lat: Latitude parameter
:param int distance: The radius of the query
:param str unit: The unit of measure for the query, defaults to miles
:param dict attribute_map: Additional attributes to apply to the location bases query
:param int page: The page to return
:param int limit: Number of results per page
:returns: List of objects
:rtype: list
"""
#Determine what type of radian conversion you want base on a unit of measure
if unit == 'miles':
distance = float(distance / 69) # depends on [control=['if'], data=[]]
else:
distance = float(distance / 111.045)
#Start with geospatial query
query = {'loc': {'$within': {'$center': [[lng, lat], distance]}}}
#Allow querying additional attributes
if attribute_map:
query = dict(query.items() + attribute_map.items()) # depends on [control=['if'], data=[]]
results = (yield self.find(query, page=page, limit=limit))
raise Return(self._list_cursor_to_json(results)) |
def _show_hide_parameter(self):
"""
shows or hides parameters
Returns:
"""
assert isinstance(self.sender(), QtWidgets.QCheckBox), 'this function should be connected to a check box'
if self.sender().isChecked():
self.tree_scripts.setColumnHidden(2, False)
iterator = QtWidgets.QTreeWidgetItemIterator(self.tree_scripts, QtWidgets.QTreeWidgetItemIterator.Hidden)
item = iterator.value()
while item:
item.setHidden(False)
item = iterator.value()
iterator += 1
else:
self.tree_scripts.setColumnHidden(2, True)
iterator = QtWidgets.QTreeWidgetItemIterator(self.tree_scripts, QtWidgets.QTreeWidgetItemIterator.NotHidden)
item = iterator.value()
while item:
if not item.visible:
item.setHidden(True)
item = iterator.value()
iterator +=1
self.tree_scripts.setColumnWidth(0, 200)
self.tree_scripts.setColumnWidth(1, 400)
self.tree_scripts.setColumnWidth(2, 50) | def function[_show_hide_parameter, parameter[self]]:
constant[
shows or hides parameters
Returns:
]
assert[call[name[isinstance], parameter[call[name[self].sender, parameter[]], name[QtWidgets].QCheckBox]]]
if call[call[name[self].sender, parameter[]].isChecked, parameter[]] begin[:]
call[name[self].tree_scripts.setColumnHidden, parameter[constant[2], constant[False]]]
variable[iterator] assign[=] call[name[QtWidgets].QTreeWidgetItemIterator, parameter[name[self].tree_scripts, name[QtWidgets].QTreeWidgetItemIterator.Hidden]]
variable[item] assign[=] call[name[iterator].value, parameter[]]
while name[item] begin[:]
call[name[item].setHidden, parameter[constant[False]]]
variable[item] assign[=] call[name[iterator].value, parameter[]]
<ast.AugAssign object at 0x7da1b23732e0>
call[name[self].tree_scripts.setColumnWidth, parameter[constant[0], constant[200]]]
call[name[self].tree_scripts.setColumnWidth, parameter[constant[1], constant[400]]]
call[name[self].tree_scripts.setColumnWidth, parameter[constant[2], constant[50]]] | keyword[def] identifier[_show_hide_parameter] ( identifier[self] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[self] . identifier[sender] (), identifier[QtWidgets] . identifier[QCheckBox] ), literal[string]
keyword[if] identifier[self] . identifier[sender] (). identifier[isChecked] ():
identifier[self] . identifier[tree_scripts] . identifier[setColumnHidden] ( literal[int] , keyword[False] )
identifier[iterator] = identifier[QtWidgets] . identifier[QTreeWidgetItemIterator] ( identifier[self] . identifier[tree_scripts] , identifier[QtWidgets] . identifier[QTreeWidgetItemIterator] . identifier[Hidden] )
identifier[item] = identifier[iterator] . identifier[value] ()
keyword[while] identifier[item] :
identifier[item] . identifier[setHidden] ( keyword[False] )
identifier[item] = identifier[iterator] . identifier[value] ()
identifier[iterator] += literal[int]
keyword[else] :
identifier[self] . identifier[tree_scripts] . identifier[setColumnHidden] ( literal[int] , keyword[True] )
identifier[iterator] = identifier[QtWidgets] . identifier[QTreeWidgetItemIterator] ( identifier[self] . identifier[tree_scripts] , identifier[QtWidgets] . identifier[QTreeWidgetItemIterator] . identifier[NotHidden] )
identifier[item] = identifier[iterator] . identifier[value] ()
keyword[while] identifier[item] :
keyword[if] keyword[not] identifier[item] . identifier[visible] :
identifier[item] . identifier[setHidden] ( keyword[True] )
identifier[item] = identifier[iterator] . identifier[value] ()
identifier[iterator] += literal[int]
identifier[self] . identifier[tree_scripts] . identifier[setColumnWidth] ( literal[int] , literal[int] )
identifier[self] . identifier[tree_scripts] . identifier[setColumnWidth] ( literal[int] , literal[int] )
identifier[self] . identifier[tree_scripts] . identifier[setColumnWidth] ( literal[int] , literal[int] ) | def _show_hide_parameter(self):
"""
shows or hides parameters
Returns:
"""
assert isinstance(self.sender(), QtWidgets.QCheckBox), 'this function should be connected to a check box'
if self.sender().isChecked():
self.tree_scripts.setColumnHidden(2, False)
iterator = QtWidgets.QTreeWidgetItemIterator(self.tree_scripts, QtWidgets.QTreeWidgetItemIterator.Hidden)
item = iterator.value()
while item:
item.setHidden(False)
item = iterator.value()
iterator += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
self.tree_scripts.setColumnHidden(2, True)
iterator = QtWidgets.QTreeWidgetItemIterator(self.tree_scripts, QtWidgets.QTreeWidgetItemIterator.NotHidden)
item = iterator.value()
while item:
if not item.visible:
item.setHidden(True) # depends on [control=['if'], data=[]]
item = iterator.value()
iterator += 1 # depends on [control=['while'], data=[]]
self.tree_scripts.setColumnWidth(0, 200)
self.tree_scripts.setColumnWidth(1, 400)
self.tree_scripts.setColumnWidth(2, 50) |
def _main():
"""
Parse command line arguments, connect to the WBEM server and open the
interactive shell.
"""
global CONN # pylint: disable=global-statement
prog = _os.path.basename(_sys.argv[0])
usage = '%(prog)s [options] server'
desc = """
Provide an interactive shell for issuing operations against a WBEM server.
wbemcli executes the WBEMConnection as part of initialization so the user can
input requests as soon as the interactive shell is started.
Use h() in thenteractive shell for help for wbemcli methods and variables.
"""
epilog = """
Examples:
%s https://localhost:15345 -n vendor -u sheldon -p penny
- (https localhost, port=15345, namespace=vendor user=sheldon
password=penny)
%s http://[2001:db8::1234-eth0] -(http port 5988 ipv6, zone id eth0)
""" % (prog, prog)
argparser = _argparse.ArgumentParser(
prog=prog, usage=usage, description=desc, epilog=epilog,
add_help=False, formatter_class=_WbemcliCustomFormatter)
pos_arggroup = argparser.add_argument_group(
'Positional arguments')
pos_arggroup.add_argument(
'server', metavar='server', nargs='?',
help='R|Host name or url of the WBEM server in this format:\n'
' [{scheme}://]{host}[:{port}]\n'
'- scheme: Defines the protocol to use;\n'
' - "https" for HTTPs protocol\n'
' - "http" for HTTP protocol.\n'
' Default: "https".\n'
'- host: Defines host name as follows:\n'
' - short or fully qualified DNS hostname,\n'
' - literal IPV4 address(dotted)\n'
' - literal IPV6 address (RFC 3986) with zone\n'
' identifier extensions(RFC 6874)\n'
' supporting "-" or %%25 for the delimiter.\n'
'- port: Defines the WBEM server port to be used\n'
' Defaults:\n'
' - HTTP - 5988\n'
' - HTTPS - 5989\n')
server_arggroup = argparser.add_argument_group(
'Server related options',
'Specify the WBEM server namespace and timeout')
server_arggroup.add_argument(
'-n', '--namespace', dest='namespace', metavar='namespace',
default='root/cimv2',
help='R|Default namespace in the WBEM server for operation\n'
'requests when namespace option not supplied with\n'
'operation request.\n'
'Default: %(default)s')
server_arggroup.add_argument(
'-t', '--timeout', dest='timeout', metavar='timeout', type=int,
default=None,
help='R|Timeout of the completion of WBEM Server operation\n'
'in seconds(integer between 0 and 300).\n'
'Default: No timeout')
security_arggroup = argparser.add_argument_group(
'Connection security related options',
'Specify user name and password or certificates and keys')
security_arggroup.add_argument(
'-u', '--user', dest='user', metavar='user',
help='R|User name for authenticating with the WBEM server.\n'
'Default: No user name.')
security_arggroup.add_argument(
'-p', '--password', dest='password', metavar='password',
help='R|Password for authenticating with the WBEM server.\n'
'Default: Will be prompted for, if user name\nspecified.')
security_arggroup.add_argument(
'-nvc', '--no-verify-cert', dest='no_verify_cert',
action='store_true',
help='Client will not verify certificate returned by the WBEM'
' server (see cacerts). This bypasses the client-side'
' verification of the server identity, but allows'
' encrypted communication with a server for which the'
' client does not have certificates.')
security_arggroup.add_argument(
'--cacerts', dest='ca_certs', metavar='cacerts',
help='R|File or directory containing certificates that will be\n'
'matched against a certificate received from the WBEM\n'
'server. Set the --no-verify-cert option to bypass\n'
'client verification of the WBEM server certificate.\n'
'Default: Searches for matching certificates in the\n'
'following system directories:\n' +
("\n".join("%s" % p for p in get_default_ca_cert_paths())))
security_arggroup.add_argument(
'--certfile', dest='cert_file', metavar='certfile',
help='R|Client certificate file for authenticating with the\n'
'WBEM server. If option specified the client attempts\n'
'to execute mutual authentication.\n'
'Default: Simple authentication.')
security_arggroup.add_argument(
'--keyfile', dest='key_file', metavar='keyfile',
help='R|Client private key file for authenticating with the\n'
'WBEM server. Not required if private key is part of the\n'
'certfile option. Not allowed if no certfile option.\n'
'Default: No client key file. Client private key should\n'
'then be part of the certfile')
general_arggroup = argparser.add_argument_group(
'General options')
general_arggroup.add_argument(
'-s', '--scripts', dest='scripts', metavar='scripts', nargs='*',
help='R|Execute the python code defined by the script before the\n'
'user gets control. This argument may be repeated to load\n'
'multiple scripts or multiple scripts may be listed for a\n'
'single use of the option. Scripts are executed after the\n'
'WBEMConnection call')
general_arggroup.add_argument(
'-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='Print more messages while processing')
general_arggroup.add_argument(
'-V', '--version', action='version', version='%(prog)s ' + __version__,
help='Display pywbem version and exit.')
general_arggroup.add_argument(
'--statistics', dest='statistics',
action='store_true', default=False,
help='Enable gathering of statistics on operations.')
general_arggroup.add_argument(
'--mock-server', dest='mock_server', metavar='file name', nargs='*',
help='R|Activate pywbem_mock in place of a live WBEMConnection and\n'
'compile/build the files defined (".mof" suffix or "py" suffix.\n'
'MOF files are compiled and python files are executed assuming\n'
'that they include mock_pywbem methods that add objects to the\n'
'repository.')
general_arggroup.add_argument(
'-l', '--log', dest='log', metavar='log_spec[,logspec]',
action='store', default=None,
help='R|Log_spec defines characteristics of the various named\n'
'loggers. It is the form:\n COMP=[DEST[:DETAIL]] where:\n'
' COMP: Logger component name:[{c}].\n'
' (Default={cd})\n'
' DEST: Destination for component:[{d}].\n'
' (Default={dd})\n'
' DETAIL: Detail Level to log: [{dl}] or\n'
' an integer that defines the maximum length of\n'
' of each log record.\n'
' (Default={dll})\n'
# pylint: disable=bad-continuation
.format(c='|'.join(LOGGER_SIMPLE_NAMES),
cd='all',
d='|'.join(LOG_DESTINATIONS),
dd=DEFAULT_LOG_DESTINATION,
dl='|'.join(LOG_DETAIL_LEVELS),
dll=DEFAULT_LOG_DETAIL_LEVEL))
general_arggroup.add_argument(
'-h', '--help', action='help',
help='Show this help message and exit')
args = argparser.parse_args()
# setup the global args so it is available to scripts
global ARGS # pylint: disable=global-statement
ARGS = args
if not args.server and not args.mock_server:
argparser.error('No WBEM server specified')
# Set up a client connection
CONN = _remote_connection(args.server, args, argparser)
if args.log:
configure_loggers_from_string(args.log, WBEMCLI_LOG_FILENAME, CONN)
# Determine file path of history file
home_dir = '.'
if 'HOME' in _os.environ:
home_dir = _os.environ['HOME'] # Linux
elif 'HOMEPATH' in _os.environ:
home_dir = _os.environ['HOMEPATH'] # Windows
histfile = '%s/.wbemcli_history' % home_dir
# Read previous command line history
if _HAVE_READLINE:
# pylint: disable=invalid-name
NotFoundError = getattr(__builtins__, 'FileNotFoundError', IOError)
try:
_readline.read_history_file(histfile)
except NotFoundError as exc:
if exc.errno != _errno.ENOENT:
raise
# Execute any python script defined by the script argument
if args.scripts:
for script in args.scripts:
if args.verbose:
print('script %s executed' % script)
with open(script) as fp:
exec(fp.read(), globals(), None) # pylint: disable=exec-used
# Interact
i = _code.InteractiveConsole(globals())
i.interact(_get_banner())
# Save command line history
if _HAVE_READLINE:
_readline.write_history_file(histfile)
return 0 | def function[_main, parameter[]]:
constant[
Parse command line arguments, connect to the WBEM server and open the
interactive shell.
]
<ast.Global object at 0x7da18bcc9fc0>
variable[prog] assign[=] call[name[_os].path.basename, parameter[call[name[_sys].argv][constant[0]]]]
variable[usage] assign[=] constant[%(prog)s [options] server]
variable[desc] assign[=] constant[
Provide an interactive shell for issuing operations against a WBEM server.
wbemcli executes the WBEMConnection as part of initialization so the user can
input requests as soon as the interactive shell is started.
Use h() in thenteractive shell for help for wbemcli methods and variables.
]
variable[epilog] assign[=] binary_operation[constant[
Examples:
%s https://localhost:15345 -n vendor -u sheldon -p penny
- (https localhost, port=15345, namespace=vendor user=sheldon
password=penny)
%s http://[2001:db8::1234-eth0] -(http port 5988 ipv6, zone id eth0)
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bcc8be0>, <ast.Name object at 0x7da18bccb490>]]]
variable[argparser] assign[=] call[name[_argparse].ArgumentParser, parameter[]]
variable[pos_arggroup] assign[=] call[name[argparser].add_argument_group, parameter[constant[Positional arguments]]]
call[name[pos_arggroup].add_argument, parameter[constant[server]]]
variable[server_arggroup] assign[=] call[name[argparser].add_argument_group, parameter[constant[Server related options], constant[Specify the WBEM server namespace and timeout]]]
call[name[server_arggroup].add_argument, parameter[constant[-n], constant[--namespace]]]
call[name[server_arggroup].add_argument, parameter[constant[-t], constant[--timeout]]]
variable[security_arggroup] assign[=] call[name[argparser].add_argument_group, parameter[constant[Connection security related options], constant[Specify user name and password or certificates and keys]]]
call[name[security_arggroup].add_argument, parameter[constant[-u], constant[--user]]]
call[name[security_arggroup].add_argument, parameter[constant[-p], constant[--password]]]
call[name[security_arggroup].add_argument, parameter[constant[-nvc], constant[--no-verify-cert]]]
call[name[security_arggroup].add_argument, parameter[constant[--cacerts]]]
call[name[security_arggroup].add_argument, parameter[constant[--certfile]]]
call[name[security_arggroup].add_argument, parameter[constant[--keyfile]]]
variable[general_arggroup] assign[=] call[name[argparser].add_argument_group, parameter[constant[General options]]]
call[name[general_arggroup].add_argument, parameter[constant[-s], constant[--scripts]]]
call[name[general_arggroup].add_argument, parameter[constant[-v], constant[--verbose]]]
call[name[general_arggroup].add_argument, parameter[constant[-V], constant[--version]]]
call[name[general_arggroup].add_argument, parameter[constant[--statistics]]]
call[name[general_arggroup].add_argument, parameter[constant[--mock-server]]]
call[name[general_arggroup].add_argument, parameter[constant[-l], constant[--log]]]
call[name[general_arggroup].add_argument, parameter[constant[-h], constant[--help]]]
variable[args] assign[=] call[name[argparser].parse_args, parameter[]]
<ast.Global object at 0x7da18bc709a0>
variable[ARGS] assign[=] name[args]
if <ast.BoolOp object at 0x7da18bc73910> begin[:]
call[name[argparser].error, parameter[constant[No WBEM server specified]]]
variable[CONN] assign[=] call[name[_remote_connection], parameter[name[args].server, name[args], name[argparser]]]
if name[args].log begin[:]
call[name[configure_loggers_from_string], parameter[name[args].log, name[WBEMCLI_LOG_FILENAME], name[CONN]]]
variable[home_dir] assign[=] constant[.]
if compare[constant[HOME] in name[_os].environ] begin[:]
variable[home_dir] assign[=] call[name[_os].environ][constant[HOME]]
variable[histfile] assign[=] binary_operation[constant[%s/.wbemcli_history] <ast.Mod object at 0x7da2590d6920> name[home_dir]]
if name[_HAVE_READLINE] begin[:]
variable[NotFoundError] assign[=] call[name[getattr], parameter[name[__builtins__], constant[FileNotFoundError], name[IOError]]]
<ast.Try object at 0x7da18bc73370>
if name[args].scripts begin[:]
for taget[name[script]] in starred[name[args].scripts] begin[:]
if name[args].verbose begin[:]
call[name[print], parameter[binary_operation[constant[script %s executed] <ast.Mod object at 0x7da2590d6920> name[script]]]]
with call[name[open], parameter[name[script]]] begin[:]
call[name[exec], parameter[call[name[fp].read, parameter[]], call[name[globals], parameter[]], constant[None]]]
variable[i] assign[=] call[name[_code].InteractiveConsole, parameter[call[name[globals], parameter[]]]]
call[name[i].interact, parameter[call[name[_get_banner], parameter[]]]]
if name[_HAVE_READLINE] begin[:]
call[name[_readline].write_history_file, parameter[name[histfile]]]
return[constant[0]] | keyword[def] identifier[_main] ():
literal[string]
keyword[global] identifier[CONN]
identifier[prog] = identifier[_os] . identifier[path] . identifier[basename] ( identifier[_sys] . identifier[argv] [ literal[int] ])
identifier[usage] = literal[string]
identifier[desc] = literal[string]
identifier[epilog] = literal[string] %( identifier[prog] , identifier[prog] )
identifier[argparser] = identifier[_argparse] . identifier[ArgumentParser] (
identifier[prog] = identifier[prog] , identifier[usage] = identifier[usage] , identifier[description] = identifier[desc] , identifier[epilog] = identifier[epilog] ,
identifier[add_help] = keyword[False] , identifier[formatter_class] = identifier[_WbemcliCustomFormatter] )
identifier[pos_arggroup] = identifier[argparser] . identifier[add_argument_group] (
literal[string] )
identifier[pos_arggroup] . identifier[add_argument] (
literal[string] , identifier[metavar] = literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[server_arggroup] = identifier[argparser] . identifier[add_argument_group] (
literal[string] ,
literal[string] )
identifier[server_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string] )
identifier[server_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] , identifier[type] = identifier[int] ,
identifier[default] = keyword[None] ,
identifier[help] = literal[string]
literal[string]
literal[string] )
identifier[security_arggroup] = identifier[argparser] . identifier[add_argument_group] (
literal[string] ,
literal[string] )
identifier[security_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[security_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[security_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[security_arggroup] . identifier[add_argument] (
literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] +
( literal[string] . identifier[join] ( literal[string] % identifier[p] keyword[for] identifier[p] keyword[in] identifier[get_default_ca_cert_paths] ())))
identifier[security_arggroup] . identifier[add_argument] (
literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string] )
identifier[security_arggroup] . identifier[add_argument] (
literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[general_arggroup] = identifier[argparser] . identifier[add_argument_group] (
literal[string] )
identifier[general_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[general_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[False] ,
identifier[help] = literal[string] )
identifier[general_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] , identifier[version] = literal[string] + identifier[__version__] ,
identifier[help] = literal[string] )
identifier[general_arggroup] . identifier[add_argument] (
literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[False] ,
identifier[help] = literal[string] )
identifier[general_arggroup] . identifier[add_argument] (
literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[general_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[c] = literal[string] . identifier[join] ( identifier[LOGGER_SIMPLE_NAMES] ),
identifier[cd] = literal[string] ,
identifier[d] = literal[string] . identifier[join] ( identifier[LOG_DESTINATIONS] ),
identifier[dd] = identifier[DEFAULT_LOG_DESTINATION] ,
identifier[dl] = literal[string] . identifier[join] ( identifier[LOG_DETAIL_LEVELS] ),
identifier[dll] = identifier[DEFAULT_LOG_DETAIL_LEVEL] ))
identifier[general_arggroup] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[args] = identifier[argparser] . identifier[parse_args] ()
keyword[global] identifier[ARGS]
identifier[ARGS] = identifier[args]
keyword[if] keyword[not] identifier[args] . identifier[server] keyword[and] keyword[not] identifier[args] . identifier[mock_server] :
identifier[argparser] . identifier[error] ( literal[string] )
identifier[CONN] = identifier[_remote_connection] ( identifier[args] . identifier[server] , identifier[args] , identifier[argparser] )
keyword[if] identifier[args] . identifier[log] :
identifier[configure_loggers_from_string] ( identifier[args] . identifier[log] , identifier[WBEMCLI_LOG_FILENAME] , identifier[CONN] )
identifier[home_dir] = literal[string]
keyword[if] literal[string] keyword[in] identifier[_os] . identifier[environ] :
identifier[home_dir] = identifier[_os] . identifier[environ] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[_os] . identifier[environ] :
identifier[home_dir] = identifier[_os] . identifier[environ] [ literal[string] ]
identifier[histfile] = literal[string] % identifier[home_dir]
keyword[if] identifier[_HAVE_READLINE] :
identifier[NotFoundError] = identifier[getattr] ( identifier[__builtins__] , literal[string] , identifier[IOError] )
keyword[try] :
identifier[_readline] . identifier[read_history_file] ( identifier[histfile] )
keyword[except] identifier[NotFoundError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[errno] != identifier[_errno] . identifier[ENOENT] :
keyword[raise]
keyword[if] identifier[args] . identifier[scripts] :
keyword[for] identifier[script] keyword[in] identifier[args] . identifier[scripts] :
keyword[if] identifier[args] . identifier[verbose] :
identifier[print] ( literal[string] % identifier[script] )
keyword[with] identifier[open] ( identifier[script] ) keyword[as] identifier[fp] :
identifier[exec] ( identifier[fp] . identifier[read] (), identifier[globals] (), keyword[None] )
identifier[i] = identifier[_code] . identifier[InteractiveConsole] ( identifier[globals] ())
identifier[i] . identifier[interact] ( identifier[_get_banner] ())
keyword[if] identifier[_HAVE_READLINE] :
identifier[_readline] . identifier[write_history_file] ( identifier[histfile] )
keyword[return] literal[int] | def _main():
"""
Parse command line arguments, connect to the WBEM server and open the
interactive shell.
"""
global CONN # pylint: disable=global-statement
prog = _os.path.basename(_sys.argv[0])
usage = '%(prog)s [options] server'
desc = '\nProvide an interactive shell for issuing operations against a WBEM server.\n\nwbemcli executes the WBEMConnection as part of initialization so the user can\ninput requests as soon as the interactive shell is started.\n\nUse h() in thenteractive shell for help for wbemcli methods and variables.\n'
epilog = '\nExamples:\n %s https://localhost:15345 -n vendor -u sheldon -p penny\n - (https localhost, port=15345, namespace=vendor user=sheldon\n password=penny)\n\n %s http://[2001:db8::1234-eth0] -(http port 5988 ipv6, zone id eth0)\n' % (prog, prog)
argparser = _argparse.ArgumentParser(prog=prog, usage=usage, description=desc, epilog=epilog, add_help=False, formatter_class=_WbemcliCustomFormatter)
pos_arggroup = argparser.add_argument_group('Positional arguments')
pos_arggroup.add_argument('server', metavar='server', nargs='?', help='R|Host name or url of the WBEM server in this format:\n [{scheme}://]{host}[:{port}]\n- scheme: Defines the protocol to use;\n - "https" for HTTPs protocol\n - "http" for HTTP protocol.\n Default: "https".\n- host: Defines host name as follows:\n - short or fully qualified DNS hostname,\n - literal IPV4 address(dotted)\n - literal IPV6 address (RFC 3986) with zone\n identifier extensions(RFC 6874)\n supporting "-" or %%25 for the delimiter.\n- port: Defines the WBEM server port to be used\n Defaults:\n - HTTP - 5988\n - HTTPS - 5989\n')
server_arggroup = argparser.add_argument_group('Server related options', 'Specify the WBEM server namespace and timeout')
server_arggroup.add_argument('-n', '--namespace', dest='namespace', metavar='namespace', default='root/cimv2', help='R|Default namespace in the WBEM server for operation\nrequests when namespace option not supplied with\noperation request.\nDefault: %(default)s')
server_arggroup.add_argument('-t', '--timeout', dest='timeout', metavar='timeout', type=int, default=None, help='R|Timeout of the completion of WBEM Server operation\nin seconds(integer between 0 and 300).\nDefault: No timeout')
security_arggroup = argparser.add_argument_group('Connection security related options', 'Specify user name and password or certificates and keys')
security_arggroup.add_argument('-u', '--user', dest='user', metavar='user', help='R|User name for authenticating with the WBEM server.\nDefault: No user name.')
security_arggroup.add_argument('-p', '--password', dest='password', metavar='password', help='R|Password for authenticating with the WBEM server.\nDefault: Will be prompted for, if user name\nspecified.')
security_arggroup.add_argument('-nvc', '--no-verify-cert', dest='no_verify_cert', action='store_true', help='Client will not verify certificate returned by the WBEM server (see cacerts). This bypasses the client-side verification of the server identity, but allows encrypted communication with a server for which the client does not have certificates.')
security_arggroup.add_argument('--cacerts', dest='ca_certs', metavar='cacerts', help='R|File or directory containing certificates that will be\nmatched against a certificate received from the WBEM\nserver. Set the --no-verify-cert option to bypass\nclient verification of the WBEM server certificate.\nDefault: Searches for matching certificates in the\nfollowing system directories:\n' + '\n'.join(('%s' % p for p in get_default_ca_cert_paths())))
security_arggroup.add_argument('--certfile', dest='cert_file', metavar='certfile', help='R|Client certificate file for authenticating with the\nWBEM server. If option specified the client attempts\nto execute mutual authentication.\nDefault: Simple authentication.')
security_arggroup.add_argument('--keyfile', dest='key_file', metavar='keyfile', help='R|Client private key file for authenticating with the\nWBEM server. Not required if private key is part of the\ncertfile option. Not allowed if no certfile option.\nDefault: No client key file. Client private key should\nthen be part of the certfile')
general_arggroup = argparser.add_argument_group('General options')
general_arggroup.add_argument('-s', '--scripts', dest='scripts', metavar='scripts', nargs='*', help='R|Execute the python code defined by the script before the\nuser gets control. This argument may be repeated to load\nmultiple scripts or multiple scripts may be listed for a\nsingle use of the option. Scripts are executed after the\nWBEMConnection call')
general_arggroup.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Print more messages while processing')
general_arggroup.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__, help='Display pywbem version and exit.')
general_arggroup.add_argument('--statistics', dest='statistics', action='store_true', default=False, help='Enable gathering of statistics on operations.')
general_arggroup.add_argument('--mock-server', dest='mock_server', metavar='file name', nargs='*', help='R|Activate pywbem_mock in place of a live WBEMConnection and\ncompile/build the files defined (".mof" suffix or "py" suffix.\nMOF files are compiled and python files are executed assuming\nthat they include mock_pywbem methods that add objects to the\nrepository.')
# pylint: disable=bad-continuation
general_arggroup.add_argument('-l', '--log', dest='log', metavar='log_spec[,logspec]', action='store', default=None, help='R|Log_spec defines characteristics of the various named\nloggers. It is the form:\n COMP=[DEST[:DETAIL]] where:\n COMP: Logger component name:[{c}].\n (Default={cd})\n DEST: Destination for component:[{d}].\n (Default={dd})\n DETAIL: Detail Level to log: [{dl}] or\n an integer that defines the maximum length of\n of each log record.\n (Default={dll})\n'.format(c='|'.join(LOGGER_SIMPLE_NAMES), cd='all', d='|'.join(LOG_DESTINATIONS), dd=DEFAULT_LOG_DESTINATION, dl='|'.join(LOG_DETAIL_LEVELS), dll=DEFAULT_LOG_DETAIL_LEVEL))
general_arggroup.add_argument('-h', '--help', action='help', help='Show this help message and exit')
args = argparser.parse_args()
# setup the global args so it is available to scripts
global ARGS # pylint: disable=global-statement
ARGS = args
if not args.server and (not args.mock_server):
argparser.error('No WBEM server specified') # depends on [control=['if'], data=[]]
# Set up a client connection
CONN = _remote_connection(args.server, args, argparser)
if args.log:
configure_loggers_from_string(args.log, WBEMCLI_LOG_FILENAME, CONN) # depends on [control=['if'], data=[]]
# Determine file path of history file
home_dir = '.'
if 'HOME' in _os.environ:
home_dir = _os.environ['HOME'] # Linux # depends on [control=['if'], data=[]]
elif 'HOMEPATH' in _os.environ:
home_dir = _os.environ['HOMEPATH'] # Windows # depends on [control=['if'], data=[]]
histfile = '%s/.wbemcli_history' % home_dir
# Read previous command line history
if _HAVE_READLINE:
# pylint: disable=invalid-name
NotFoundError = getattr(__builtins__, 'FileNotFoundError', IOError)
try:
_readline.read_history_file(histfile) # depends on [control=['try'], data=[]]
except NotFoundError as exc:
if exc.errno != _errno.ENOENT:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
# Execute any python script defined by the script argument
if args.scripts:
for script in args.scripts:
if args.verbose:
print('script %s executed' % script) # depends on [control=['if'], data=[]]
with open(script) as fp:
exec(fp.read(), globals(), None) # pylint: disable=exec-used # depends on [control=['with'], data=['fp']] # depends on [control=['for'], data=['script']] # depends on [control=['if'], data=[]]
# Interact
i = _code.InteractiveConsole(globals())
i.interact(_get_banner())
# Save command line history
if _HAVE_READLINE:
_readline.write_history_file(histfile) # depends on [control=['if'], data=[]]
return 0 |
def _i2p(self, ind, coord):
""" Translate index info to parameter name """
return '-'.join([self.param_prefix, str(ind), coord]) | def function[_i2p, parameter[self, ind, coord]]:
constant[ Translate index info to parameter name ]
return[call[constant[-].join, parameter[list[[<ast.Attribute object at 0x7da18ede62c0>, <ast.Call object at 0x7da18ede7c70>, <ast.Name object at 0x7da18ede4040>]]]]] | keyword[def] identifier[_i2p] ( identifier[self] , identifier[ind] , identifier[coord] ):
literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[self] . identifier[param_prefix] , identifier[str] ( identifier[ind] ), identifier[coord] ]) | def _i2p(self, ind, coord):
""" Translate index info to parameter name """
return '-'.join([self.param_prefix, str(ind), coord]) |
def increment_object_counter(node_uri, epoch_field, dry_run=False):
"""
Increment the object counter used to create unique object identifiers.
@param node_uri:
@param epoch_field:
@param dry_run:
@return: The object count AFTER incrementing.
"""
current_count = read_object_counter(node_uri, epoch_field, dry_run=dry_run)
if current_count is None:
new_count = "01"
else:
new_count = coding.base36encode(coding.base36decode(current_count) + 1,
pad_length=2)
set_property(node_uri,
build_counter_tag(epoch_field, dry_run=dry_run),
new_count,
ossos_base=True)
return new_count | def function[increment_object_counter, parameter[node_uri, epoch_field, dry_run]]:
constant[
Increment the object counter used to create unique object identifiers.
@param node_uri:
@param epoch_field:
@param dry_run:
@return: The object count AFTER incrementing.
]
variable[current_count] assign[=] call[name[read_object_counter], parameter[name[node_uri], name[epoch_field]]]
if compare[name[current_count] is constant[None]] begin[:]
variable[new_count] assign[=] constant[01]
call[name[set_property], parameter[name[node_uri], call[name[build_counter_tag], parameter[name[epoch_field]]], name[new_count]]]
return[name[new_count]] | keyword[def] identifier[increment_object_counter] ( identifier[node_uri] , identifier[epoch_field] , identifier[dry_run] = keyword[False] ):
literal[string]
identifier[current_count] = identifier[read_object_counter] ( identifier[node_uri] , identifier[epoch_field] , identifier[dry_run] = identifier[dry_run] )
keyword[if] identifier[current_count] keyword[is] keyword[None] :
identifier[new_count] = literal[string]
keyword[else] :
identifier[new_count] = identifier[coding] . identifier[base36encode] ( identifier[coding] . identifier[base36decode] ( identifier[current_count] )+ literal[int] ,
identifier[pad_length] = literal[int] )
identifier[set_property] ( identifier[node_uri] ,
identifier[build_counter_tag] ( identifier[epoch_field] , identifier[dry_run] = identifier[dry_run] ),
identifier[new_count] ,
identifier[ossos_base] = keyword[True] )
keyword[return] identifier[new_count] | def increment_object_counter(node_uri, epoch_field, dry_run=False):
"""
Increment the object counter used to create unique object identifiers.
@param node_uri:
@param epoch_field:
@param dry_run:
@return: The object count AFTER incrementing.
"""
current_count = read_object_counter(node_uri, epoch_field, dry_run=dry_run)
if current_count is None:
new_count = '01' # depends on [control=['if'], data=[]]
else:
new_count = coding.base36encode(coding.base36decode(current_count) + 1, pad_length=2)
set_property(node_uri, build_counter_tag(epoch_field, dry_run=dry_run), new_count, ossos_base=True)
return new_count |
def GetEventTaggingRules(self):
"""Retrieves the event tagging rules from the tagging file.
Returns:
dict[str, FilterObject]: tagging rules, that consists of one or more
filter objects per label.
Raises:
TaggingFileError: if a filter expression cannot be compiled.
"""
tagging_rules = {}
label_name = None
with io.open(self._path, 'r', encoding='utf-8') as tagging_file:
for line in tagging_file.readlines():
line = line.rstrip()
stripped_line = line.lstrip()
if not stripped_line or stripped_line[0] == '#':
continue
if not line[0].isspace():
label_name = line
tagging_rules[label_name] = []
continue
if not label_name:
continue
filter_object = event_filter.EventObjectFilter()
try:
filter_object.CompileFilter(stripped_line)
except errors.ParseError as exception:
raise errors.TaggingFileError((
'Unable to compile filter for label: {0:s} with error: '
'{1!s}').format(label_name, exception))
if filter_object not in tagging_rules[label_name]:
tagging_rules[label_name].append(filter_object)
return tagging_rules | def function[GetEventTaggingRules, parameter[self]]:
constant[Retrieves the event tagging rules from the tagging file.
Returns:
dict[str, FilterObject]: tagging rules, that consists of one or more
filter objects per label.
Raises:
TaggingFileError: if a filter expression cannot be compiled.
]
variable[tagging_rules] assign[=] dictionary[[], []]
variable[label_name] assign[=] constant[None]
with call[name[io].open, parameter[name[self]._path, constant[r]]] begin[:]
for taget[name[line]] in starred[call[name[tagging_file].readlines, parameter[]]] begin[:]
variable[line] assign[=] call[name[line].rstrip, parameter[]]
variable[stripped_line] assign[=] call[name[line].lstrip, parameter[]]
if <ast.BoolOp object at 0x7da1b26ae4d0> begin[:]
continue
if <ast.UnaryOp object at 0x7da204620370> begin[:]
variable[label_name] assign[=] name[line]
call[name[tagging_rules]][name[label_name]] assign[=] list[[]]
continue
if <ast.UnaryOp object at 0x7da204623040> begin[:]
continue
variable[filter_object] assign[=] call[name[event_filter].EventObjectFilter, parameter[]]
<ast.Try object at 0x7da204621420>
if compare[name[filter_object] <ast.NotIn object at 0x7da2590d7190> call[name[tagging_rules]][name[label_name]]] begin[:]
call[call[name[tagging_rules]][name[label_name]].append, parameter[name[filter_object]]]
return[name[tagging_rules]] | keyword[def] identifier[GetEventTaggingRules] ( identifier[self] ):
literal[string]
identifier[tagging_rules] ={}
identifier[label_name] = keyword[None]
keyword[with] identifier[io] . identifier[open] ( identifier[self] . identifier[_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[tagging_file] :
keyword[for] identifier[line] keyword[in] identifier[tagging_file] . identifier[readlines] ():
identifier[line] = identifier[line] . identifier[rstrip] ()
identifier[stripped_line] = identifier[line] . identifier[lstrip] ()
keyword[if] keyword[not] identifier[stripped_line] keyword[or] identifier[stripped_line] [ literal[int] ]== literal[string] :
keyword[continue]
keyword[if] keyword[not] identifier[line] [ literal[int] ]. identifier[isspace] ():
identifier[label_name] = identifier[line]
identifier[tagging_rules] [ identifier[label_name] ]=[]
keyword[continue]
keyword[if] keyword[not] identifier[label_name] :
keyword[continue]
identifier[filter_object] = identifier[event_filter] . identifier[EventObjectFilter] ()
keyword[try] :
identifier[filter_object] . identifier[CompileFilter] ( identifier[stripped_line] )
keyword[except] identifier[errors] . identifier[ParseError] keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[TaggingFileError] ((
literal[string]
literal[string] ). identifier[format] ( identifier[label_name] , identifier[exception] ))
keyword[if] identifier[filter_object] keyword[not] keyword[in] identifier[tagging_rules] [ identifier[label_name] ]:
identifier[tagging_rules] [ identifier[label_name] ]. identifier[append] ( identifier[filter_object] )
keyword[return] identifier[tagging_rules] | def GetEventTaggingRules(self):
"""Retrieves the event tagging rules from the tagging file.
Returns:
dict[str, FilterObject]: tagging rules, that consists of one or more
filter objects per label.
Raises:
TaggingFileError: if a filter expression cannot be compiled.
"""
tagging_rules = {}
label_name = None
with io.open(self._path, 'r', encoding='utf-8') as tagging_file:
for line in tagging_file.readlines():
line = line.rstrip()
stripped_line = line.lstrip()
if not stripped_line or stripped_line[0] == '#':
continue # depends on [control=['if'], data=[]]
if not line[0].isspace():
label_name = line
tagging_rules[label_name] = []
continue # depends on [control=['if'], data=[]]
if not label_name:
continue # depends on [control=['if'], data=[]]
filter_object = event_filter.EventObjectFilter()
try:
filter_object.CompileFilter(stripped_line) # depends on [control=['try'], data=[]]
except errors.ParseError as exception:
raise errors.TaggingFileError('Unable to compile filter for label: {0:s} with error: {1!s}'.format(label_name, exception)) # depends on [control=['except'], data=['exception']]
if filter_object not in tagging_rules[label_name]:
tagging_rules[label_name].append(filter_object) # depends on [control=['if'], data=['filter_object']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['tagging_file']]
return tagging_rules |
def iiscgi(application):
"""A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks.
This is not a production quality interface and will behave badly under load.
"""
try:
from wsgiref.handlers import IISCGIHandler
except ImportError:
print("Python 3.2 or newer is required.")
if not __debug__:
warnings.warn("Interactive debugging and other persistence-based processes will not work.")
IISCGIHandler().run(application) | def function[iiscgi, parameter[application]]:
constant[A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks.
This is not a production quality interface and will behave badly under load.
]
<ast.Try object at 0x7da1b0ef4b20>
if <ast.UnaryOp object at 0x7da1b0ef7640> begin[:]
call[name[warnings].warn, parameter[constant[Interactive debugging and other persistence-based processes will not work.]]]
call[call[name[IISCGIHandler], parameter[]].run, parameter[name[application]]] | keyword[def] identifier[iiscgi] ( identifier[application] ):
literal[string]
keyword[try] :
keyword[from] identifier[wsgiref] . identifier[handlers] keyword[import] identifier[IISCGIHandler]
keyword[except] identifier[ImportError] :
identifier[print] ( literal[string] )
keyword[if] keyword[not] identifier[__debug__] :
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[IISCGIHandler] (). identifier[run] ( identifier[application] ) | def iiscgi(application):
"""A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks.
This is not a production quality interface and will behave badly under load.
"""
try:
from wsgiref.handlers import IISCGIHandler # depends on [control=['try'], data=[]]
except ImportError:
print('Python 3.2 or newer is required.') # depends on [control=['except'], data=[]]
if not __debug__:
warnings.warn('Interactive debugging and other persistence-based processes will not work.') # depends on [control=['if'], data=[]]
IISCGIHandler().run(application) |
def zpool_command(command, flags=None, opts=None, property_name=None, property_value=None,
filesystem_properties=None, pool_properties=None, target=None):
'''
Build and properly escape a zpool command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
'''
return _command(
'zpool',
command=command,
flags=flags,
opts=opts,
property_name=property_name,
property_value=property_value,
filesystem_properties=filesystem_properties,
pool_properties=pool_properties,
target=target,
) | def function[zpool_command, parameter[command, flags, opts, property_name, property_value, filesystem_properties, pool_properties, target]]:
constant[
Build and properly escape a zpool command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
]
return[call[name[_command], parameter[constant[zpool]]]] | keyword[def] identifier[zpool_command] ( identifier[command] , identifier[flags] = keyword[None] , identifier[opts] = keyword[None] , identifier[property_name] = keyword[None] , identifier[property_value] = keyword[None] ,
identifier[filesystem_properties] = keyword[None] , identifier[pool_properties] = keyword[None] , identifier[target] = keyword[None] ):
literal[string]
keyword[return] identifier[_command] (
literal[string] ,
identifier[command] = identifier[command] ,
identifier[flags] = identifier[flags] ,
identifier[opts] = identifier[opts] ,
identifier[property_name] = identifier[property_name] ,
identifier[property_value] = identifier[property_value] ,
identifier[filesystem_properties] = identifier[filesystem_properties] ,
identifier[pool_properties] = identifier[pool_properties] ,
identifier[target] = identifier[target] ,
) | def zpool_command(command, flags=None, opts=None, property_name=None, property_value=None, filesystem_properties=None, pool_properties=None, target=None):
"""
Build and properly escape a zpool command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
"""
return _command('zpool', command=command, flags=flags, opts=opts, property_name=property_name, property_value=property_value, filesystem_properties=filesystem_properties, pool_properties=pool_properties, target=target) |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._limit_monetary_account is not None:
return False
if self._limit_monetary_account_remaining is not None:
return False
if self._limit_card_debit_maestro is not None:
return False
if self._limit_card_debit_mastercard is not None:
return False
if self._limit_card_debit_wildcard is not None:
return False
if self._limit_card_debit_replacement is not None:
return False
if self._limit_invite_user_premium_limited is not None:
return False
if self._limit_amount_monthly is not None:
return False
if self._spent_amount_monthly is not None:
return False
return True | def function[is_all_field_none, parameter[self]]:
constant[
:rtype: bool
]
if compare[name[self]._limit_monetary_account is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._limit_monetary_account_remaining is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._limit_card_debit_maestro is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._limit_card_debit_mastercard is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._limit_card_debit_wildcard is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._limit_card_debit_replacement is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._limit_invite_user_premium_limited is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._limit_amount_monthly is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._spent_amount_monthly is_not constant[None]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_all_field_none] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_limit_monetary_account] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_limit_monetary_account_remaining] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_limit_card_debit_maestro] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_limit_card_debit_mastercard] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_limit_card_debit_wildcard] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_limit_card_debit_replacement] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_limit_invite_user_premium_limited] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_limit_amount_monthly] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_spent_amount_monthly] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_all_field_none(self):
"""
:rtype: bool
"""
if self._limit_monetary_account is not None:
return False # depends on [control=['if'], data=[]]
if self._limit_monetary_account_remaining is not None:
return False # depends on [control=['if'], data=[]]
if self._limit_card_debit_maestro is not None:
return False # depends on [control=['if'], data=[]]
if self._limit_card_debit_mastercard is not None:
return False # depends on [control=['if'], data=[]]
if self._limit_card_debit_wildcard is not None:
return False # depends on [control=['if'], data=[]]
if self._limit_card_debit_replacement is not None:
return False # depends on [control=['if'], data=[]]
if self._limit_invite_user_premium_limited is not None:
return False # depends on [control=['if'], data=[]]
if self._limit_amount_monthly is not None:
return False # depends on [control=['if'], data=[]]
if self._spent_amount_monthly is not None:
return False # depends on [control=['if'], data=[]]
return True |
def resolvePublic(self, pubID):
"""Try to lookup the catalog local reference associated to a
public ID in that catalog """
ret = libxml2mod.xmlACatalogResolvePublic(self._o, pubID)
return ret | def function[resolvePublic, parameter[self, pubID]]:
constant[Try to lookup the catalog local reference associated to a
public ID in that catalog ]
variable[ret] assign[=] call[name[libxml2mod].xmlACatalogResolvePublic, parameter[name[self]._o, name[pubID]]]
return[name[ret]] | keyword[def] identifier[resolvePublic] ( identifier[self] , identifier[pubID] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlACatalogResolvePublic] ( identifier[self] . identifier[_o] , identifier[pubID] )
keyword[return] identifier[ret] | def resolvePublic(self, pubID):
"""Try to lookup the catalog local reference associated to a
public ID in that catalog """
ret = libxml2mod.xmlACatalogResolvePublic(self._o, pubID)
return ret |
def from_fsi(fname, skiprows=9):
"""
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast()
"""
f = _read_file(fname)
df = pd.read_csv(
f,
header="infer",
index_col=None,
skiprows=skiprows,
dtype=float,
delim_whitespace=True,
)
f.close()
df.set_index("PRES", drop=True, inplace=True)
df.index.name = "Pressure [dbar]"
metadata = {"name": str(fname)}
setattr(df, "_metadata", metadata)
return df | def function[from_fsi, parameter[fname, skiprows]]:
constant[
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast()
]
variable[f] assign[=] call[name[_read_file], parameter[name[fname]]]
variable[df] assign[=] call[name[pd].read_csv, parameter[name[f]]]
call[name[f].close, parameter[]]
call[name[df].set_index, parameter[constant[PRES]]]
name[df].index.name assign[=] constant[Pressure [dbar]]
variable[metadata] assign[=] dictionary[[<ast.Constant object at 0x7da1b05db970>], [<ast.Call object at 0x7da1b05d9180>]]
call[name[setattr], parameter[name[df], constant[_metadata], name[metadata]]]
return[name[df]] | keyword[def] identifier[from_fsi] ( identifier[fname] , identifier[skiprows] = literal[int] ):
literal[string]
identifier[f] = identifier[_read_file] ( identifier[fname] )
identifier[df] = identifier[pd] . identifier[read_csv] (
identifier[f] ,
identifier[header] = literal[string] ,
identifier[index_col] = keyword[None] ,
identifier[skiprows] = identifier[skiprows] ,
identifier[dtype] = identifier[float] ,
identifier[delim_whitespace] = keyword[True] ,
)
identifier[f] . identifier[close] ()
identifier[df] . identifier[set_index] ( literal[string] , identifier[drop] = keyword[True] , identifier[inplace] = keyword[True] )
identifier[df] . identifier[index] . identifier[name] = literal[string]
identifier[metadata] ={ literal[string] : identifier[str] ( identifier[fname] )}
identifier[setattr] ( identifier[df] , literal[string] , identifier[metadata] )
keyword[return] identifier[df] | def from_fsi(fname, skiprows=9):
"""
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast()
"""
f = _read_file(fname)
df = pd.read_csv(f, header='infer', index_col=None, skiprows=skiprows, dtype=float, delim_whitespace=True)
f.close()
df.set_index('PRES', drop=True, inplace=True)
df.index.name = 'Pressure [dbar]'
metadata = {'name': str(fname)}
setattr(df, '_metadata', metadata)
return df |
def add_color_to_scheme(scheme, name, foreground, background, palette_colors):
"""Add foreground and background colours to a color scheme"""
if foreground is None and background is None:
return scheme
new_scheme = []
for item in scheme:
if item[0] == name:
if foreground is None:
foreground = item[1]
if background is None:
background = item[2]
if palette_colors > 16:
new_scheme.append((name, '', '', '', foreground, background))
else:
new_scheme.append((name, foreground, background))
else:
new_scheme.append(item)
return new_scheme | def function[add_color_to_scheme, parameter[scheme, name, foreground, background, palette_colors]]:
constant[Add foreground and background colours to a color scheme]
if <ast.BoolOp object at 0x7da207f98c40> begin[:]
return[name[scheme]]
variable[new_scheme] assign[=] list[[]]
for taget[name[item]] in starred[name[scheme]] begin[:]
if compare[call[name[item]][constant[0]] equal[==] name[name]] begin[:]
if compare[name[foreground] is constant[None]] begin[:]
variable[foreground] assign[=] call[name[item]][constant[1]]
if compare[name[background] is constant[None]] begin[:]
variable[background] assign[=] call[name[item]][constant[2]]
if compare[name[palette_colors] greater[>] constant[16]] begin[:]
call[name[new_scheme].append, parameter[tuple[[<ast.Name object at 0x7da207f9ad70>, <ast.Constant object at 0x7da207f99f30>, <ast.Constant object at 0x7da207f99bd0>, <ast.Constant object at 0x7da207f9ae90>, <ast.Name object at 0x7da207f98a90>, <ast.Name object at 0x7da20e9629b0>]]]]
return[name[new_scheme]] | keyword[def] identifier[add_color_to_scheme] ( identifier[scheme] , identifier[name] , identifier[foreground] , identifier[background] , identifier[palette_colors] ):
literal[string]
keyword[if] identifier[foreground] keyword[is] keyword[None] keyword[and] identifier[background] keyword[is] keyword[None] :
keyword[return] identifier[scheme]
identifier[new_scheme] =[]
keyword[for] identifier[item] keyword[in] identifier[scheme] :
keyword[if] identifier[item] [ literal[int] ]== identifier[name] :
keyword[if] identifier[foreground] keyword[is] keyword[None] :
identifier[foreground] = identifier[item] [ literal[int] ]
keyword[if] identifier[background] keyword[is] keyword[None] :
identifier[background] = identifier[item] [ literal[int] ]
keyword[if] identifier[palette_colors] > literal[int] :
identifier[new_scheme] . identifier[append] (( identifier[name] , literal[string] , literal[string] , literal[string] , identifier[foreground] , identifier[background] ))
keyword[else] :
identifier[new_scheme] . identifier[append] (( identifier[name] , identifier[foreground] , identifier[background] ))
keyword[else] :
identifier[new_scheme] . identifier[append] ( identifier[item] )
keyword[return] identifier[new_scheme] | def add_color_to_scheme(scheme, name, foreground, background, palette_colors):
"""Add foreground and background colours to a color scheme"""
if foreground is None and background is None:
return scheme # depends on [control=['if'], data=[]]
new_scheme = []
for item in scheme:
if item[0] == name:
if foreground is None:
foreground = item[1] # depends on [control=['if'], data=['foreground']]
if background is None:
background = item[2] # depends on [control=['if'], data=['background']]
if palette_colors > 16:
new_scheme.append((name, '', '', '', foreground, background)) # depends on [control=['if'], data=[]]
else:
new_scheme.append((name, foreground, background)) # depends on [control=['if'], data=['name']]
else:
new_scheme.append(item) # depends on [control=['for'], data=['item']]
return new_scheme |
def _help_commands(self):
""" Help on all the available commands """
help = 'Workbench Commands:'
for command in self.list_all_commands():
full_help = self.work_request('help_formatter', command)['help_formatter']['help']
compact_help = full_help.split('\n')[:2]
help += '\n\n%s' % '\n'.join(compact_help)
return help | def function[_help_commands, parameter[self]]:
constant[ Help on all the available commands ]
variable[help] assign[=] constant[Workbench Commands:]
for taget[name[command]] in starred[call[name[self].list_all_commands, parameter[]]] begin[:]
variable[full_help] assign[=] call[call[call[name[self].work_request, parameter[constant[help_formatter], name[command]]]][constant[help_formatter]]][constant[help]]
variable[compact_help] assign[=] call[call[name[full_help].split, parameter[constant[
]]]][<ast.Slice object at 0x7da20c6c6dd0>]
<ast.AugAssign object at 0x7da20c6c56c0>
return[name[help]] | keyword[def] identifier[_help_commands] ( identifier[self] ):
literal[string]
identifier[help] = literal[string]
keyword[for] identifier[command] keyword[in] identifier[self] . identifier[list_all_commands] ():
identifier[full_help] = identifier[self] . identifier[work_request] ( literal[string] , identifier[command] )[ literal[string] ][ literal[string] ]
identifier[compact_help] = identifier[full_help] . identifier[split] ( literal[string] )[: literal[int] ]
identifier[help] += literal[string] % literal[string] . identifier[join] ( identifier[compact_help] )
keyword[return] identifier[help] | def _help_commands(self):
""" Help on all the available commands """
help = 'Workbench Commands:'
for command in self.list_all_commands():
full_help = self.work_request('help_formatter', command)['help_formatter']['help']
compact_help = full_help.split('\n')[:2]
help += '\n\n%s' % '\n'.join(compact_help) # depends on [control=['for'], data=['command']]
return help |
def spec(self, postf_un_ops: str) -> list:
"""Return prefix unary operators list"""
spec = [(l + op, {'pat': self.pat(pat),
'postf': self.postf(r, postf_un_ops),
'regex': None})
for op, pat in self.styles.items()
for l, r in self.brackets]
spec[0][1]['regex'] = self.regex_pat.format(
_ops_regex(l for l, r in self.brackets),
_ops_regex(self.styles.keys())
)
return spec | def function[spec, parameter[self, postf_un_ops]]:
constant[Return prefix unary operators list]
variable[spec] assign[=] <ast.ListComp object at 0x7da212db4cd0>
call[call[call[name[spec]][constant[0]]][constant[1]]][constant[regex]] assign[=] call[name[self].regex_pat.format, parameter[call[name[_ops_regex], parameter[<ast.GeneratorExp object at 0x7da2041da0e0>]], call[name[_ops_regex], parameter[call[name[self].styles.keys, parameter[]]]]]]
return[name[spec]] | keyword[def] identifier[spec] ( identifier[self] , identifier[postf_un_ops] : identifier[str] )-> identifier[list] :
literal[string]
identifier[spec] =[( identifier[l] + identifier[op] ,{ literal[string] : identifier[self] . identifier[pat] ( identifier[pat] ),
literal[string] : identifier[self] . identifier[postf] ( identifier[r] , identifier[postf_un_ops] ),
literal[string] : keyword[None] })
keyword[for] identifier[op] , identifier[pat] keyword[in] identifier[self] . identifier[styles] . identifier[items] ()
keyword[for] identifier[l] , identifier[r] keyword[in] identifier[self] . identifier[brackets] ]
identifier[spec] [ literal[int] ][ literal[int] ][ literal[string] ]= identifier[self] . identifier[regex_pat] . identifier[format] (
identifier[_ops_regex] ( identifier[l] keyword[for] identifier[l] , identifier[r] keyword[in] identifier[self] . identifier[brackets] ),
identifier[_ops_regex] ( identifier[self] . identifier[styles] . identifier[keys] ())
)
keyword[return] identifier[spec] | def spec(self, postf_un_ops: str) -> list:
"""Return prefix unary operators list"""
spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for (op, pat) in self.styles.items() for (l, r) in self.brackets]
spec[0][1]['regex'] = self.regex_pat.format(_ops_regex((l for (l, r) in self.brackets)), _ops_regex(self.styles.keys()))
return spec |
def drawOval(self, tetra):
"""Draw an ellipse inside a tetrapod.
"""
if len(tetra) != 4:
raise ValueError("invalid arg length")
if hasattr(tetra[0], "__float__"):
q = Rect(tetra).quad
else:
q = Quad(tetra)
mt = q.ul + (q.ur - q.ul) * 0.5
mr = q.ur + (q.lr - q.ur) * 0.5
mb = q.ll + (q.lr - q.ll) * 0.5
ml = q.ul + (q.ll - q.ul) * 0.5
if not (self.lastPoint == ml):
self.draw_cont += "%g %g m\n" % JM_TUPLE(ml * self.ipctm)
self.lastPoint = ml
self.drawCurve(ml, q.ll, mb)
self.drawCurve(mb, q.lr, mr)
self.drawCurve(mr, q.ur, mt)
self.drawCurve(mt, q.ul, ml)
self.updateRect(q.rect)
self.lastPoint = ml
return self.lastPoint | def function[drawOval, parameter[self, tetra]]:
constant[Draw an ellipse inside a tetrapod.
]
if compare[call[name[len], parameter[name[tetra]]] not_equal[!=] constant[4]] begin[:]
<ast.Raise object at 0x7da1b2347af0>
if call[name[hasattr], parameter[call[name[tetra]][constant[0]], constant[__float__]]] begin[:]
variable[q] assign[=] call[name[Rect], parameter[name[tetra]]].quad
variable[mt] assign[=] binary_operation[name[q].ul + binary_operation[binary_operation[name[q].ur - name[q].ul] * constant[0.5]]]
variable[mr] assign[=] binary_operation[name[q].ur + binary_operation[binary_operation[name[q].lr - name[q].ur] * constant[0.5]]]
variable[mb] assign[=] binary_operation[name[q].ll + binary_operation[binary_operation[name[q].lr - name[q].ll] * constant[0.5]]]
variable[ml] assign[=] binary_operation[name[q].ul + binary_operation[binary_operation[name[q].ll - name[q].ul] * constant[0.5]]]
if <ast.UnaryOp object at 0x7da20c76df90> begin[:]
<ast.AugAssign object at 0x7da20c76d870>
name[self].lastPoint assign[=] name[ml]
call[name[self].drawCurve, parameter[name[ml], name[q].ll, name[mb]]]
call[name[self].drawCurve, parameter[name[mb], name[q].lr, name[mr]]]
call[name[self].drawCurve, parameter[name[mr], name[q].ur, name[mt]]]
call[name[self].drawCurve, parameter[name[mt], name[q].ul, name[ml]]]
call[name[self].updateRect, parameter[name[q].rect]]
name[self].lastPoint assign[=] name[ml]
return[name[self].lastPoint] | keyword[def] identifier[drawOval] ( identifier[self] , identifier[tetra] ):
literal[string]
keyword[if] identifier[len] ( identifier[tetra] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[hasattr] ( identifier[tetra] [ literal[int] ], literal[string] ):
identifier[q] = identifier[Rect] ( identifier[tetra] ). identifier[quad]
keyword[else] :
identifier[q] = identifier[Quad] ( identifier[tetra] )
identifier[mt] = identifier[q] . identifier[ul] +( identifier[q] . identifier[ur] - identifier[q] . identifier[ul] )* literal[int]
identifier[mr] = identifier[q] . identifier[ur] +( identifier[q] . identifier[lr] - identifier[q] . identifier[ur] )* literal[int]
identifier[mb] = identifier[q] . identifier[ll] +( identifier[q] . identifier[lr] - identifier[q] . identifier[ll] )* literal[int]
identifier[ml] = identifier[q] . identifier[ul] +( identifier[q] . identifier[ll] - identifier[q] . identifier[ul] )* literal[int]
keyword[if] keyword[not] ( identifier[self] . identifier[lastPoint] == identifier[ml] ):
identifier[self] . identifier[draw_cont] += literal[string] % identifier[JM_TUPLE] ( identifier[ml] * identifier[self] . identifier[ipctm] )
identifier[self] . identifier[lastPoint] = identifier[ml]
identifier[self] . identifier[drawCurve] ( identifier[ml] , identifier[q] . identifier[ll] , identifier[mb] )
identifier[self] . identifier[drawCurve] ( identifier[mb] , identifier[q] . identifier[lr] , identifier[mr] )
identifier[self] . identifier[drawCurve] ( identifier[mr] , identifier[q] . identifier[ur] , identifier[mt] )
identifier[self] . identifier[drawCurve] ( identifier[mt] , identifier[q] . identifier[ul] , identifier[ml] )
identifier[self] . identifier[updateRect] ( identifier[q] . identifier[rect] )
identifier[self] . identifier[lastPoint] = identifier[ml]
keyword[return] identifier[self] . identifier[lastPoint] | def drawOval(self, tetra):
"""Draw an ellipse inside a tetrapod.
"""
if len(tetra) != 4:
raise ValueError('invalid arg length') # depends on [control=['if'], data=[]]
if hasattr(tetra[0], '__float__'):
q = Rect(tetra).quad # depends on [control=['if'], data=[]]
else:
q = Quad(tetra)
mt = q.ul + (q.ur - q.ul) * 0.5
mr = q.ur + (q.lr - q.ur) * 0.5
mb = q.ll + (q.lr - q.ll) * 0.5
ml = q.ul + (q.ll - q.ul) * 0.5
if not self.lastPoint == ml:
self.draw_cont += '%g %g m\n' % JM_TUPLE(ml * self.ipctm)
self.lastPoint = ml # depends on [control=['if'], data=[]]
self.drawCurve(ml, q.ll, mb)
self.drawCurve(mb, q.lr, mr)
self.drawCurve(mr, q.ur, mt)
self.drawCurve(mt, q.ul, ml)
self.updateRect(q.rect)
self.lastPoint = ml
return self.lastPoint |
def _set_service_name_from_command(self, cmd):
"""Set the name of a service according to the command.
This is only relevant if the name wasn't explicitly provided.
Note that this is risky as it sets the name according to the
name of the file the command is using. If two services
use the same binary, even if their args are different, they
will be named the same.
"""
# TODO: Consider assign incremental integers to the name if a service
# with the same name already exists.
name = os.path.basename(cmd)
logger.info(
'Service name not supplied. Assigning name according to '
'executable: %s', name)
return name | def function[_set_service_name_from_command, parameter[self, cmd]]:
constant[Set the name of a service according to the command.
This is only relevant if the name wasn't explicitly provided.
Note that this is risky as it sets the name according to the
name of the file the command is using. If two services
use the same binary, even if their args are different, they
will be named the same.
]
variable[name] assign[=] call[name[os].path.basename, parameter[name[cmd]]]
call[name[logger].info, parameter[constant[Service name not supplied. Assigning name according to executable: %s], name[name]]]
return[name[name]] | keyword[def] identifier[_set_service_name_from_command] ( identifier[self] , identifier[cmd] ):
literal[string]
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[cmd] )
identifier[logger] . identifier[info] (
literal[string]
literal[string] , identifier[name] )
keyword[return] identifier[name] | def _set_service_name_from_command(self, cmd):
"""Set the name of a service according to the command.
This is only relevant if the name wasn't explicitly provided.
Note that this is risky as it sets the name according to the
name of the file the command is using. If two services
use the same binary, even if their args are different, they
will be named the same.
"""
# TODO: Consider assign incremental integers to the name if a service
# with the same name already exists.
name = os.path.basename(cmd)
logger.info('Service name not supplied. Assigning name according to executable: %s', name)
return name |
def debug_ratelimit(g):
"""Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object
"""
assert isinstance(g, github.MainClass.Github), type(g)
debug("github ratelimit: {rl}".format(rl=g.rate_limiting)) | def function[debug_ratelimit, parameter[g]]:
constant[Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object
]
assert[call[name[isinstance], parameter[name[g], name[github].MainClass.Github]]]
call[name[debug], parameter[call[constant[github ratelimit: {rl}].format, parameter[]]]] | keyword[def] identifier[debug_ratelimit] ( identifier[g] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[g] , identifier[github] . identifier[MainClass] . identifier[Github] ), identifier[type] ( identifier[g] )
identifier[debug] ( literal[string] . identifier[format] ( identifier[rl] = identifier[g] . identifier[rate_limiting] )) | def debug_ratelimit(g):
"""Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object
"""
assert isinstance(g, github.MainClass.Github), type(g)
debug('github ratelimit: {rl}'.format(rl=g.rate_limiting)) |
def predict_topk(self, dataset, output_type="probability", k=3,
batch_size=None):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability` or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image | list
Drawings to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the
prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "rank"])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1")
if k <= 0:
raise ValueError("'k' must be >= 1")
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
prob_vector = self.predict(
dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [
{'class': classes[i], 'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
else:
assert(output_type == 'rank')
results = prob_vector.apply(lambda p: [
{'class': classes[index], 'rank': rank}
for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]
)
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results | def function[predict_topk, parameter[self, dataset, output_type, k, batch_size]]:
constant[
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability` or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image | list
Drawings to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the
prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
]
call[name[_tkutl]._check_categorical_option_type, parameter[constant[output_type], name[output_type], list[[<ast.Constant object at 0x7da1b21d3bb0>, <ast.Constant object at 0x7da1b21d3b80>]]]]
if <ast.UnaryOp object at 0x7da1b21d3af0> begin[:]
<ast.Raise object at 0x7da1b21d3a00>
if compare[name[k] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b21d3880>
if <ast.BoolOp object at 0x7da1b21d3790> begin[:]
<ast.Raise object at 0x7da1b21d35e0>
if <ast.BoolOp object at 0x7da1b21d34f0> begin[:]
<ast.Raise object at 0x7da1b21d33a0>
variable[prob_vector] assign[=] call[name[self].predict, parameter[name[dataset]]]
variable[classes] assign[=] name[self].classes
if compare[name[output_type] equal[==] constant[probability]] begin[:]
variable[results] assign[=] call[name[prob_vector].apply, parameter[<ast.Lambda object at 0x7da1b21d0100>]]
variable[results] assign[=] call[name[_tc].SFrame, parameter[dictionary[[<ast.Constant object at 0x7da1b21d0df0>], [<ast.Name object at 0x7da1b21d0e20>]]]]
variable[results] assign[=] call[name[results].add_row_number, parameter[]]
variable[results] assign[=] call[name[results].stack, parameter[constant[X]]]
variable[results] assign[=] call[name[results].unpack, parameter[constant[X]]]
return[name[results]] | keyword[def] identifier[predict_topk] ( identifier[self] , identifier[dataset] , identifier[output_type] = literal[string] , identifier[k] = literal[int] ,
identifier[batch_size] = keyword[None] ):
literal[string]
identifier[_tkutl] . identifier[_check_categorical_option_type] ( literal[string] , identifier[output_type] ,
[ literal[string] , literal[string] ])
keyword[if] keyword[not] identifier[isinstance] ( identifier[k] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[k] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[batch_size] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[batch_size] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[batch_size] keyword[is] keyword[not] keyword[None] keyword[and] identifier[batch_size] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[prob_vector] = identifier[self] . identifier[predict] (
identifier[dataset] , identifier[output_type] = literal[string] , identifier[batch_size] = identifier[batch_size] )
identifier[classes] = identifier[self] . identifier[classes]
keyword[if] identifier[output_type] == literal[string] :
identifier[results] = identifier[prob_vector] . identifier[apply] ( keyword[lambda] identifier[p] :[
{ literal[string] : identifier[classes] [ identifier[i] ], literal[string] : identifier[p] [ identifier[i] ]}
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[_np] . identifier[argsort] ( identifier[p] )[- identifier[k] :])]
)
keyword[else] :
keyword[assert] ( identifier[output_type] == literal[string] )
identifier[results] = identifier[prob_vector] . identifier[apply] ( keyword[lambda] identifier[p] :[
{ literal[string] : identifier[classes] [ identifier[index] ], literal[string] : identifier[rank] }
keyword[for] identifier[rank] , identifier[index] keyword[in] identifier[enumerate] ( identifier[reversed] ( identifier[_np] . identifier[argsort] ( identifier[p] )[- identifier[k] :]))]
)
identifier[results] = identifier[_tc] . identifier[SFrame] ({ literal[string] : identifier[results] })
identifier[results] = identifier[results] . identifier[add_row_number] ()
identifier[results] = identifier[results] . identifier[stack] ( literal[string] , identifier[new_column_name] = literal[string] )
identifier[results] = identifier[results] . identifier[unpack] ( literal[string] , identifier[column_name_prefix] = literal[string] )
keyword[return] identifier[results] | def predict_topk(self, dataset, output_type='probability', k=3, batch_size=None):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability` or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image | list
Drawings to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the
prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
_tkutl._check_categorical_option_type('output_type', output_type, ['probability', 'rank'])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1") # depends on [control=['if'], data=[]]
if k <= 0:
raise ValueError("'k' must be >= 1") # depends on [control=['if'], data=[]]
if batch_size is not None and (not isinstance(batch_size, int)):
raise TypeError("'batch_size' must be an integer >= 1") # depends on [control=['if'], data=[]]
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1") # depends on [control=['if'], data=[]]
prob_vector = self.predict(dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [{'class': classes[i], 'probability': p[i]} for i in reversed(_np.argsort(p)[-k:])]) # depends on [control=['if'], data=[]]
else:
assert output_type == 'rank'
results = prob_vector.apply(lambda p: [{'class': classes[index], 'rank': rank} for (rank, index) in enumerate(reversed(_np.argsort(p)[-k:]))])
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results |
def from_array(array):
"""
Deserialize a new KeyboardButton from a given dictionary.
:return: new KeyboardButton instance.
:rtype: KeyboardButton
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['text'] = u(array.get('text'))
data['request_contact'] = bool(array.get('request_contact')) if array.get('request_contact') is not None else None
data['request_location'] = bool(array.get('request_location')) if array.get('request_location') is not None else None
instance = KeyboardButton(**data)
instance._raw = array
return instance | def function[from_array, parameter[array]]:
constant[
Deserialize a new KeyboardButton from a given dictionary.
:return: new KeyboardButton instance.
:rtype: KeyboardButton
]
if <ast.BoolOp object at 0x7da1b04fc820> begin[:]
return[constant[None]]
call[name[assert_type_or_raise], parameter[name[array], name[dict]]]
variable[data] assign[=] dictionary[[], []]
call[name[data]][constant[text]] assign[=] call[name[u], parameter[call[name[array].get, parameter[constant[text]]]]]
call[name[data]][constant[request_contact]] assign[=] <ast.IfExp object at 0x7da1b04fc490>
call[name[data]][constant[request_location]] assign[=] <ast.IfExp object at 0x7da1b04fc0d0>
variable[instance] assign[=] call[name[KeyboardButton], parameter[]]
name[instance]._raw assign[=] name[array]
return[name[instance]] | keyword[def] identifier[from_array] ( identifier[array] ):
literal[string]
keyword[if] identifier[array] keyword[is] keyword[None] keyword[or] keyword[not] identifier[array] :
keyword[return] keyword[None]
identifier[assert_type_or_raise] ( identifier[array] , identifier[dict] , identifier[parameter_name] = literal[string] )
identifier[data] ={}
identifier[data] [ literal[string] ]= identifier[u] ( identifier[array] . identifier[get] ( literal[string] ))
identifier[data] [ literal[string] ]= identifier[bool] ( identifier[array] . identifier[get] ( literal[string] )) keyword[if] identifier[array] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
identifier[data] [ literal[string] ]= identifier[bool] ( identifier[array] . identifier[get] ( literal[string] )) keyword[if] identifier[array] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
identifier[instance] = identifier[KeyboardButton] (** identifier[data] )
identifier[instance] . identifier[_raw] = identifier[array]
keyword[return] identifier[instance] | def from_array(array):
"""
Deserialize a new KeyboardButton from a given dictionary.
:return: new KeyboardButton instance.
:rtype: KeyboardButton
"""
if array is None or not array:
return None # depends on [control=['if'], data=[]]
# end if
assert_type_or_raise(array, dict, parameter_name='array')
data = {}
data['text'] = u(array.get('text'))
data['request_contact'] = bool(array.get('request_contact')) if array.get('request_contact') is not None else None
data['request_location'] = bool(array.get('request_location')) if array.get('request_location') is not None else None
instance = KeyboardButton(**data)
instance._raw = array
return instance |
def stop(self):
"""
Instructs the kernel process to stop channels
and the kernel manager to then shutdown the process.
"""
logger.debug('Stopping kernel')
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
del self.km | def function[stop, parameter[self]]:
constant[
Instructs the kernel process to stop channels
and the kernel manager to then shutdown the process.
]
call[name[logger].debug, parameter[constant[Stopping kernel]]]
call[name[self].kc.stop_channels, parameter[]]
call[name[self].km.shutdown_kernel, parameter[]]
<ast.Delete object at 0x7da1b1e15810> | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[kc] . identifier[stop_channels] ()
identifier[self] . identifier[km] . identifier[shutdown_kernel] ( identifier[now] = keyword[True] )
keyword[del] identifier[self] . identifier[km] | def stop(self):
"""
Instructs the kernel process to stop channels
and the kernel manager to then shutdown the process.
"""
logger.debug('Stopping kernel')
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
del self.km |
def get_instance(self, payload):
"""
Build an instance of FieldValueInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance
"""
return FieldValueInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
field_type_sid=self._solution['field_type_sid'],
) | def function[get_instance, parameter[self, payload]]:
constant[
Build an instance of FieldValueInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance
]
return[call[name[FieldValueInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[get_instance] ( identifier[self] , identifier[payload] ):
literal[string]
keyword[return] identifier[FieldValueInstance] (
identifier[self] . identifier[_version] ,
identifier[payload] ,
identifier[assistant_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[field_type_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
) | def get_instance(self, payload):
"""
Build an instance of FieldValueInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance
"""
return FieldValueInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], field_type_sid=self._solution['field_type_sid']) |
def _widget_from_p(self, pname, ptype):
"""Returns a widget from its ptype and pname"""
widget_name = self.type2widget[ptype].__name__.lower()
widget_name = "_".join([widget_name, pname])
return getattr(self, widget_name) | def function[_widget_from_p, parameter[self, pname, ptype]]:
constant[Returns a widget from its ptype and pname]
variable[widget_name] assign[=] call[call[name[self].type2widget][name[ptype]].__name__.lower, parameter[]]
variable[widget_name] assign[=] call[constant[_].join, parameter[list[[<ast.Name object at 0x7da1b15188b0>, <ast.Name object at 0x7da1b151b910>]]]]
return[call[name[getattr], parameter[name[self], name[widget_name]]]] | keyword[def] identifier[_widget_from_p] ( identifier[self] , identifier[pname] , identifier[ptype] ):
literal[string]
identifier[widget_name] = identifier[self] . identifier[type2widget] [ identifier[ptype] ]. identifier[__name__] . identifier[lower] ()
identifier[widget_name] = literal[string] . identifier[join] ([ identifier[widget_name] , identifier[pname] ])
keyword[return] identifier[getattr] ( identifier[self] , identifier[widget_name] ) | def _widget_from_p(self, pname, ptype):
"""Returns a widget from its ptype and pname"""
widget_name = self.type2widget[ptype].__name__.lower()
widget_name = '_'.join([widget_name, pname])
return getattr(self, widget_name) |
def get_last_scene_id(self, refresh=False):
"""Get last scene id.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
"""
if refresh:
self.refresh_complex_value('LastSceneID')
self.refresh_complex_value('sl_CentralScene')
val = self.get_complex_value('LastSceneID') or self.get_complex_value('sl_CentralScene')
return val | def function[get_last_scene_id, parameter[self, refresh]]:
constant[Get last scene id.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
]
if name[refresh] begin[:]
call[name[self].refresh_complex_value, parameter[constant[LastSceneID]]]
call[name[self].refresh_complex_value, parameter[constant[sl_CentralScene]]]
variable[val] assign[=] <ast.BoolOp object at 0x7da1b11067d0>
return[name[val]] | keyword[def] identifier[get_last_scene_id] ( identifier[self] , identifier[refresh] = keyword[False] ):
literal[string]
keyword[if] identifier[refresh] :
identifier[self] . identifier[refresh_complex_value] ( literal[string] )
identifier[self] . identifier[refresh_complex_value] ( literal[string] )
identifier[val] = identifier[self] . identifier[get_complex_value] ( literal[string] ) keyword[or] identifier[self] . identifier[get_complex_value] ( literal[string] )
keyword[return] identifier[val] | def get_last_scene_id(self, refresh=False):
"""Get last scene id.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
"""
if refresh:
self.refresh_complex_value('LastSceneID')
self.refresh_complex_value('sl_CentralScene') # depends on [control=['if'], data=[]]
val = self.get_complex_value('LastSceneID') or self.get_complex_value('sl_CentralScene')
return val |
def close(self):
"""Shuts down the thread."""
if not self.closed:
log.debug("Closing worker thread")
self.closed = True
if self._wait:
self._wait.set() | def function[close, parameter[self]]:
constant[Shuts down the thread.]
if <ast.UnaryOp object at 0x7da204567370> begin[:]
call[name[log].debug, parameter[constant[Closing worker thread]]]
name[self].closed assign[=] constant[True]
if name[self]._wait begin[:]
call[name[self]._wait.set, parameter[]] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[closed] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[closed] = keyword[True]
keyword[if] identifier[self] . identifier[_wait] :
identifier[self] . identifier[_wait] . identifier[set] () | def close(self):
"""Shuts down the thread."""
if not self.closed:
log.debug('Closing worker thread') # depends on [control=['if'], data=[]]
self.closed = True
if self._wait:
self._wait.set() # depends on [control=['if'], data=[]] |
def el_is_empty(el):
"""Return ``True`` if tuple ``el`` represents an empty XML element."""
if len(el) == 1 and not isinstance(el[0], (list, tuple)):
return True
subels_are_empty = []
for subel in el:
if isinstance(subel, (list, tuple)):
subels_are_empty.append(el_is_empty(subel))
else:
subels_are_empty.append(not bool(subel))
return all(subels_are_empty) | def function[el_is_empty, parameter[el]]:
constant[Return ``True`` if tuple ``el`` represents an empty XML element.]
if <ast.BoolOp object at 0x7da1b1c11390> begin[:]
return[constant[True]]
variable[subels_are_empty] assign[=] list[[]]
for taget[name[subel]] in starred[name[el]] begin[:]
if call[name[isinstance], parameter[name[subel], tuple[[<ast.Name object at 0x7da18ede6aa0>, <ast.Name object at 0x7da18ede7e80>]]]] begin[:]
call[name[subels_are_empty].append, parameter[call[name[el_is_empty], parameter[name[subel]]]]]
return[call[name[all], parameter[name[subels_are_empty]]]] | keyword[def] identifier[el_is_empty] ( identifier[el] ):
literal[string]
keyword[if] identifier[len] ( identifier[el] )== literal[int] keyword[and] keyword[not] identifier[isinstance] ( identifier[el] [ literal[int] ],( identifier[list] , identifier[tuple] )):
keyword[return] keyword[True]
identifier[subels_are_empty] =[]
keyword[for] identifier[subel] keyword[in] identifier[el] :
keyword[if] identifier[isinstance] ( identifier[subel] ,( identifier[list] , identifier[tuple] )):
identifier[subels_are_empty] . identifier[append] ( identifier[el_is_empty] ( identifier[subel] ))
keyword[else] :
identifier[subels_are_empty] . identifier[append] ( keyword[not] identifier[bool] ( identifier[subel] ))
keyword[return] identifier[all] ( identifier[subels_are_empty] ) | def el_is_empty(el):
"""Return ``True`` if tuple ``el`` represents an empty XML element."""
if len(el) == 1 and (not isinstance(el[0], (list, tuple))):
return True # depends on [control=['if'], data=[]]
subels_are_empty = []
for subel in el:
if isinstance(subel, (list, tuple)):
subels_are_empty.append(el_is_empty(subel)) # depends on [control=['if'], data=[]]
else:
subels_are_empty.append(not bool(subel)) # depends on [control=['for'], data=['subel']]
return all(subels_are_empty) |
def _filter_from_dict(current: Dict[str, Any]) -> Dict[str, Any]:
"""Takes in a nested dictionary as a filter and returns a flattened filter dictionary"""
filter_ = dict()
for k, v in current.items():
if isinstance(v, dict):
for sub, v2 in _filter_from_dict(v).items():
filter_[f'{k}.{sub}'] = v2
else:
filter_[k] = v
return filter_ | def function[_filter_from_dict, parameter[current]]:
constant[Takes in a nested dictionary as a filter and returns a flattened filter dictionary]
variable[filter_] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b170a560>, <ast.Name object at 0x7da1b170a530>]]] in starred[call[name[current].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[dict]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b170b1f0>, <ast.Name object at 0x7da1b170b250>]]] in starred[call[call[name[_filter_from_dict], parameter[name[v]]].items, parameter[]]] begin[:]
call[name[filter_]][<ast.JoinedStr object at 0x7da1b17099f0>] assign[=] name[v2]
return[name[filter_]] | keyword[def] identifier[_filter_from_dict] ( identifier[current] : identifier[Dict] [ identifier[str] , identifier[Any] ])-> identifier[Dict] [ identifier[str] , identifier[Any] ]:
literal[string]
identifier[filter_] = identifier[dict] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[current] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ):
keyword[for] identifier[sub] , identifier[v2] keyword[in] identifier[_filter_from_dict] ( identifier[v] ). identifier[items] ():
identifier[filter_] [ literal[string] ]= identifier[v2]
keyword[else] :
identifier[filter_] [ identifier[k] ]= identifier[v]
keyword[return] identifier[filter_] | def _filter_from_dict(current: Dict[str, Any]) -> Dict[str, Any]:
"""Takes in a nested dictionary as a filter and returns a flattened filter dictionary"""
filter_ = dict()
for (k, v) in current.items():
if isinstance(v, dict):
for (sub, v2) in _filter_from_dict(v).items():
filter_[f'{k}.{sub}'] = v2 # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
filter_[k] = v # depends on [control=['for'], data=[]]
return filter_ |
def parsesingle(s, strictmode=True, expansionlimit=None, convertpos=False):
'''like parse, but only consumes a single top level node, e.g. parsing
'a\nb' will only return a node for 'a', leaving b unparsed'''
p = _parser(s, strictmode=strictmode, expansionlimit=expansionlimit)
tree = p.parse()
if convertpos:
ast.posconverter(s).visit(tree)
return tree | def function[parsesingle, parameter[s, strictmode, expansionlimit, convertpos]]:
constant[like parse, but only consumes a single top level node, e.g. parsing
'a
b' will only return a node for 'a', leaving b unparsed]
variable[p] assign[=] call[name[_parser], parameter[name[s]]]
variable[tree] assign[=] call[name[p].parse, parameter[]]
if name[convertpos] begin[:]
call[call[name[ast].posconverter, parameter[name[s]]].visit, parameter[name[tree]]]
return[name[tree]] | keyword[def] identifier[parsesingle] ( identifier[s] , identifier[strictmode] = keyword[True] , identifier[expansionlimit] = keyword[None] , identifier[convertpos] = keyword[False] ):
literal[string]
identifier[p] = identifier[_parser] ( identifier[s] , identifier[strictmode] = identifier[strictmode] , identifier[expansionlimit] = identifier[expansionlimit] )
identifier[tree] = identifier[p] . identifier[parse] ()
keyword[if] identifier[convertpos] :
identifier[ast] . identifier[posconverter] ( identifier[s] ). identifier[visit] ( identifier[tree] )
keyword[return] identifier[tree] | def parsesingle(s, strictmode=True, expansionlimit=None, convertpos=False):
"""like parse, but only consumes a single top level node, e.g. parsing
'a
b' will only return a node for 'a', leaving b unparsed"""
p = _parser(s, strictmode=strictmode, expansionlimit=expansionlimit)
tree = p.parse()
if convertpos:
ast.posconverter(s).visit(tree) # depends on [control=['if'], data=[]]
return tree |
def drift(stack, region, profile):
"""
Produce a CloudFormation drift report for the given stack.
"""
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(
Stack=stack,
Region=region,
Profile=profile,
Verbose=True
)
if tool.determine_drift():
sys.exit(0)
else:
sys.exit(1) | def function[drift, parameter[stack, region, profile]]:
constant[
Produce a CloudFormation drift report for the given stack.
]
call[name[logging].debug, parameter[call[constant[finding drift - stack: {}].format, parameter[name[stack]]]]]
call[name[logging].debug, parameter[call[constant[region: {}].format, parameter[name[region]]]]]
call[name[logging].debug, parameter[call[constant[profile: {}].format, parameter[name[profile]]]]]
variable[tool] assign[=] call[name[DriftTool], parameter[]]
if call[name[tool].determine_drift, parameter[]] begin[:]
call[name[sys].exit, parameter[constant[0]]] | keyword[def] identifier[drift] ( identifier[stack] , identifier[region] , identifier[profile] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[stack] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[region] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[profile] ))
identifier[tool] = identifier[DriftTool] (
identifier[Stack] = identifier[stack] ,
identifier[Region] = identifier[region] ,
identifier[Profile] = identifier[profile] ,
identifier[Verbose] = keyword[True]
)
keyword[if] identifier[tool] . identifier[determine_drift] ():
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
identifier[sys] . identifier[exit] ( literal[int] ) | def drift(stack, region, profile):
"""
Produce a CloudFormation drift report for the given stack.
"""
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(Stack=stack, Region=region, Profile=profile, Verbose=True)
if tool.determine_drift():
sys.exit(0) # depends on [control=['if'], data=[]]
else:
sys.exit(1) |
def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta | def function[integer_byte_size, parameter[n]]:
constant[Returns the number of bytes necessary to store the integer n.]
<ast.Tuple object at 0x7da204566c50> assign[=] call[name[divmod], parameter[call[name[integer_bit_size], parameter[name[n]]], constant[8]]]
if <ast.BoolOp object at 0x7da204567760> begin[:]
<ast.AugAssign object at 0x7da204564970>
return[name[quanta]] | keyword[def] identifier[integer_byte_size] ( identifier[n] ):
literal[string]
identifier[quanta] , identifier[mod] = identifier[divmod] ( identifier[integer_bit_size] ( identifier[n] ), literal[int] )
keyword[if] identifier[mod] keyword[or] identifier[n] == literal[int] :
identifier[quanta] += literal[int]
keyword[return] identifier[quanta] | def integer_byte_size(n):
"""Returns the number of bytes necessary to store the integer n."""
(quanta, mod) = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1 # depends on [control=['if'], data=[]]
return quanta |
def new_preload():
"""Job running prior to builds - fetches TestRunner image"""
testrunner_image = get_testrunner_image()
local_image = testrunner_image.rsplit(':')[-2].rsplit('/')[-1]
version_file = 'testrunner_version.txt'
template = yaml.safe_load(f"""
machine:
image: 'circleci/classic:201710-02'
steps:
- run:
name: AWS login
command: |-
login="$(aws ecr get-login --no-include-email)"
${{login}}
- run:
name: Pull TestRunner Image
command: docker pull {testrunner_image}
- run:
name: Make cache directory
command: mkdir -p {cache_dir}
- run:
name: Obtain Testrunner Version
command: echo $(docker run {testrunner_image} python -m trunner --version) > {cache_dir}/{version_file}
- run:
name: Export docker image layer cache
command: docker save -o {cache_dir}/{testrunner_cache} {testrunner_image}
- persist_to_workspace: # workspace is used later in this same build
root: {cache_dir}
paths: '{testrunner_cache}'
- persist_to_workspace: # workspace is used later in this same build
root: {cache_dir}
paths: '{version_file}'
- store_artifacts:
path: {version_file}
""")
return 'preload', template | def function[new_preload, parameter[]]:
constant[Job running prior to builds - fetches TestRunner image]
variable[testrunner_image] assign[=] call[name[get_testrunner_image], parameter[]]
variable[local_image] assign[=] call[call[call[call[name[testrunner_image].rsplit, parameter[constant[:]]]][<ast.UnaryOp object at 0x7da1b04d7310>].rsplit, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b04d4a60>]
variable[version_file] assign[=] constant[testrunner_version.txt]
variable[template] assign[=] call[name[yaml].safe_load, parameter[<ast.JoinedStr object at 0x7da1b04d4190>]]
return[tuple[[<ast.Constant object at 0x7da1b04d6e90>, <ast.Name object at 0x7da1b04d6ce0>]]] | keyword[def] identifier[new_preload] ():
literal[string]
identifier[testrunner_image] = identifier[get_testrunner_image] ()
identifier[local_image] = identifier[testrunner_image] . identifier[rsplit] ( literal[string] )[- literal[int] ]. identifier[rsplit] ( literal[string] )[- literal[int] ]
identifier[version_file] = literal[string]
identifier[template] = identifier[yaml] . identifier[safe_load] ( literal[string] )
keyword[return] literal[string] , identifier[template] | def new_preload():
"""Job running prior to builds - fetches TestRunner image"""
testrunner_image = get_testrunner_image()
local_image = testrunner_image.rsplit(':')[-2].rsplit('/')[-1]
version_file = 'testrunner_version.txt'
template = yaml.safe_load(f"""\n machine:\n image: 'circleci/classic:201710-02'\n steps:\n - run:\n name: AWS login\n command: |-\n login="$(aws ecr get-login --no-include-email)"\n ${{login}}\n - run:\n name: Pull TestRunner Image\n command: docker pull {testrunner_image}\n - run:\n name: Make cache directory\n command: mkdir -p {cache_dir}\n - run:\n name: Obtain Testrunner Version\n command: echo $(docker run {testrunner_image} python -m trunner --version) > {cache_dir}/{version_file}\n - run:\n name: Export docker image layer cache\n command: docker save -o {cache_dir}/{testrunner_cache} {testrunner_image}\n - persist_to_workspace: # workspace is used later in this same build\n root: {cache_dir}\n paths: '{testrunner_cache}'\n - persist_to_workspace: # workspace is used later in this same build\n root: {cache_dir}\n paths: '{version_file}'\n - store_artifacts:\n path: {version_file}\n """)
return ('preload', template) |
def _get_persistent_modules():
'''
Returns a list of modules in loader.conf that load on boot.
'''
mods = set()
with salt.utils.files.fopen(_LOADER_CONF, 'r') as loader_conf:
for line in loader_conf:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
mod_name = _get_module_name(line)
if mod_name:
mods.add(mod_name)
return mods | def function[_get_persistent_modules, parameter[]]:
constant[
Returns a list of modules in loader.conf that load on boot.
]
variable[mods] assign[=] call[name[set], parameter[]]
with call[name[salt].utils.files.fopen, parameter[name[_LOADER_CONF], constant[r]]] begin[:]
for taget[name[line]] in starred[name[loader_conf]] begin[:]
variable[line] assign[=] call[name[salt].utils.stringutils.to_unicode, parameter[name[line]]]
variable[line] assign[=] call[name[line].strip, parameter[]]
variable[mod_name] assign[=] call[name[_get_module_name], parameter[name[line]]]
if name[mod_name] begin[:]
call[name[mods].add, parameter[name[mod_name]]]
return[name[mods]] | keyword[def] identifier[_get_persistent_modules] ():
literal[string]
identifier[mods] = identifier[set] ()
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[_LOADER_CONF] , literal[string] ) keyword[as] identifier[loader_conf] :
keyword[for] identifier[line] keyword[in] identifier[loader_conf] :
identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] )
identifier[line] = identifier[line] . identifier[strip] ()
identifier[mod_name] = identifier[_get_module_name] ( identifier[line] )
keyword[if] identifier[mod_name] :
identifier[mods] . identifier[add] ( identifier[mod_name] )
keyword[return] identifier[mods] | def _get_persistent_modules():
"""
Returns a list of modules in loader.conf that load on boot.
"""
mods = set()
with salt.utils.files.fopen(_LOADER_CONF, 'r') as loader_conf:
for line in loader_conf:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
mod_name = _get_module_name(line)
if mod_name:
mods.add(mod_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['loader_conf']]
return mods |
def _cache_translation_needs_fallback(instance, language_code, related_name, timeout=cache.default_timeout):
"""
Store the fact that a translation doesn't exist, and the fallback should be used.
"""
if not appsettings.PARLER_ENABLE_CACHING or not instance.pk or instance._state.adding:
return
tr_model = instance._parler_meta.get_model_by_related_name(related_name)
key = get_translation_cache_key(tr_model, instance.pk, language_code)
cache.set(key, {'__FALLBACK__': True}, timeout=timeout) | def function[_cache_translation_needs_fallback, parameter[instance, language_code, related_name, timeout]]:
constant[
Store the fact that a translation doesn't exist, and the fallback should be used.
]
if <ast.BoolOp object at 0x7da18ede43a0> begin[:]
return[None]
variable[tr_model] assign[=] call[name[instance]._parler_meta.get_model_by_related_name, parameter[name[related_name]]]
variable[key] assign[=] call[name[get_translation_cache_key], parameter[name[tr_model], name[instance].pk, name[language_code]]]
call[name[cache].set, parameter[name[key], dictionary[[<ast.Constant object at 0x7da18ede7b20>], [<ast.Constant object at 0x7da18ede62c0>]]]] | keyword[def] identifier[_cache_translation_needs_fallback] ( identifier[instance] , identifier[language_code] , identifier[related_name] , identifier[timeout] = identifier[cache] . identifier[default_timeout] ):
literal[string]
keyword[if] keyword[not] identifier[appsettings] . identifier[PARLER_ENABLE_CACHING] keyword[or] keyword[not] identifier[instance] . identifier[pk] keyword[or] identifier[instance] . identifier[_state] . identifier[adding] :
keyword[return]
identifier[tr_model] = identifier[instance] . identifier[_parler_meta] . identifier[get_model_by_related_name] ( identifier[related_name] )
identifier[key] = identifier[get_translation_cache_key] ( identifier[tr_model] , identifier[instance] . identifier[pk] , identifier[language_code] )
identifier[cache] . identifier[set] ( identifier[key] ,{ literal[string] : keyword[True] }, identifier[timeout] = identifier[timeout] ) | def _cache_translation_needs_fallback(instance, language_code, related_name, timeout=cache.default_timeout):
"""
Store the fact that a translation doesn't exist, and the fallback should be used.
"""
if not appsettings.PARLER_ENABLE_CACHING or not instance.pk or instance._state.adding:
return # depends on [control=['if'], data=[]]
tr_model = instance._parler_meta.get_model_by_related_name(related_name)
key = get_translation_cache_key(tr_model, instance.pk, language_code)
cache.set(key, {'__FALLBACK__': True}, timeout=timeout) |
def handle_container_output_args(options, parser):
"""Handle the options specified by add_container_output_args().
@return: a dict that can be used as kwargs for the ContainerExecutor.execute_run()
"""
if options.result_files:
result_files_patterns = [os.path.normpath(p) for p in options.result_files if p]
for pattern in result_files_patterns:
if pattern.startswith(".."):
parser.error("Invalid relative result-files pattern '{}'.".format(pattern))
else:
result_files_patterns = ["."]
output_dir = options.output_directory
if os.path.exists(output_dir) and not os.path.isdir(output_dir):
parser.error("Output directory '{}' must not refer to an existing file.".format(output_dir))
return {
'output_dir': output_dir,
'result_files_patterns': result_files_patterns,
} | def function[handle_container_output_args, parameter[options, parser]]:
constant[Handle the options specified by add_container_output_args().
@return: a dict that can be used as kwargs for the ContainerExecutor.execute_run()
]
if name[options].result_files begin[:]
variable[result_files_patterns] assign[=] <ast.ListComp object at 0x7da18eb56b00>
for taget[name[pattern]] in starred[name[result_files_patterns]] begin[:]
if call[name[pattern].startswith, parameter[constant[..]]] begin[:]
call[name[parser].error, parameter[call[constant[Invalid relative result-files pattern '{}'.].format, parameter[name[pattern]]]]]
variable[output_dir] assign[=] name[options].output_directory
if <ast.BoolOp object at 0x7da18eb571f0> begin[:]
call[name[parser].error, parameter[call[constant[Output directory '{}' must not refer to an existing file.].format, parameter[name[output_dir]]]]]
return[dictionary[[<ast.Constant object at 0x7da204567070>, <ast.Constant object at 0x7da204564160>], [<ast.Name object at 0x7da204566080>, <ast.Name object at 0x7da204564ca0>]]] | keyword[def] identifier[handle_container_output_args] ( identifier[options] , identifier[parser] ):
literal[string]
keyword[if] identifier[options] . identifier[result_files] :
identifier[result_files_patterns] =[ identifier[os] . identifier[path] . identifier[normpath] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[options] . identifier[result_files] keyword[if] identifier[p] ]
keyword[for] identifier[pattern] keyword[in] identifier[result_files_patterns] :
keyword[if] identifier[pattern] . identifier[startswith] ( literal[string] ):
identifier[parser] . identifier[error] ( literal[string] . identifier[format] ( identifier[pattern] ))
keyword[else] :
identifier[result_files_patterns] =[ literal[string] ]
identifier[output_dir] = identifier[options] . identifier[output_directory]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[output_dir] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[output_dir] ):
identifier[parser] . identifier[error] ( literal[string] . identifier[format] ( identifier[output_dir] ))
keyword[return] {
literal[string] : identifier[output_dir] ,
literal[string] : identifier[result_files_patterns] ,
} | def handle_container_output_args(options, parser):
"""Handle the options specified by add_container_output_args().
@return: a dict that can be used as kwargs for the ContainerExecutor.execute_run()
"""
if options.result_files:
result_files_patterns = [os.path.normpath(p) for p in options.result_files if p]
for pattern in result_files_patterns:
if pattern.startswith('..'):
parser.error("Invalid relative result-files pattern '{}'.".format(pattern)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']] # depends on [control=['if'], data=[]]
else:
result_files_patterns = ['.']
output_dir = options.output_directory
if os.path.exists(output_dir) and (not os.path.isdir(output_dir)):
parser.error("Output directory '{}' must not refer to an existing file.".format(output_dir)) # depends on [control=['if'], data=[]]
return {'output_dir': output_dir, 'result_files_patterns': result_files_patterns} |
def get_marshaller_for_type(self, tp):
""" Gets the appropriate marshaller for a type.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with type 'tp'. The modules it requires, if
available, will be loaded.
Parameters
----------
tp : type or str
Python object ``type`` (which would be the class reference)
or its string representation like ``'collections.deque'``.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.types
"""
if not isinstance(tp, str):
tp = tp.__module__ + '.' + tp.__name__
if tp in self._types:
index = self._types[tp]
else:
return None, False
m = self._marshallers[index]
if self._imported_required_modules[index]:
return m, True
if not self._has_required_modules[index]:
return m, False
success = self._import_marshaller_modules(m)
self._has_required_modules[index] = success
self._imported_required_modules[index] = success
return m, success | def function[get_marshaller_for_type, parameter[self, tp]]:
constant[ Gets the appropriate marshaller for a type.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with type 'tp'. The modules it requires, if
available, will be loaded.
Parameters
----------
tp : type or str
Python object ``type`` (which would be the class reference)
or its string representation like ``'collections.deque'``.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.types
]
if <ast.UnaryOp object at 0x7da1b2872320> begin[:]
variable[tp] assign[=] binary_operation[binary_operation[name[tp].__module__ + constant[.]] + name[tp].__name__]
if compare[name[tp] in name[self]._types] begin[:]
variable[index] assign[=] call[name[self]._types][name[tp]]
variable[m] assign[=] call[name[self]._marshallers][name[index]]
if call[name[self]._imported_required_modules][name[index]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b2872a40>, <ast.Constant object at 0x7da1b2872c20>]]]
if <ast.UnaryOp object at 0x7da1b28726b0> begin[:]
return[tuple[[<ast.Name object at 0x7da1b28709a0>, <ast.Constant object at 0x7da1b2872680>]]]
variable[success] assign[=] call[name[self]._import_marshaller_modules, parameter[name[m]]]
call[name[self]._has_required_modules][name[index]] assign[=] name[success]
call[name[self]._imported_required_modules][name[index]] assign[=] name[success]
return[tuple[[<ast.Name object at 0x7da1b2873b80>, <ast.Name object at 0x7da1b2871f60>]]] | keyword[def] identifier[get_marshaller_for_type] ( identifier[self] , identifier[tp] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[tp] , identifier[str] ):
identifier[tp] = identifier[tp] . identifier[__module__] + literal[string] + identifier[tp] . identifier[__name__]
keyword[if] identifier[tp] keyword[in] identifier[self] . identifier[_types] :
identifier[index] = identifier[self] . identifier[_types] [ identifier[tp] ]
keyword[else] :
keyword[return] keyword[None] , keyword[False]
identifier[m] = identifier[self] . identifier[_marshallers] [ identifier[index] ]
keyword[if] identifier[self] . identifier[_imported_required_modules] [ identifier[index] ]:
keyword[return] identifier[m] , keyword[True]
keyword[if] keyword[not] identifier[self] . identifier[_has_required_modules] [ identifier[index] ]:
keyword[return] identifier[m] , keyword[False]
identifier[success] = identifier[self] . identifier[_import_marshaller_modules] ( identifier[m] )
identifier[self] . identifier[_has_required_modules] [ identifier[index] ]= identifier[success]
identifier[self] . identifier[_imported_required_modules] [ identifier[index] ]= identifier[success]
keyword[return] identifier[m] , identifier[success] | def get_marshaller_for_type(self, tp):
""" Gets the appropriate marshaller for a type.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with type 'tp'. The modules it requires, if
available, will be loaded.
Parameters
----------
tp : type or str
Python object ``type`` (which would be the class reference)
or its string representation like ``'collections.deque'``.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.types
"""
if not isinstance(tp, str):
tp = tp.__module__ + '.' + tp.__name__ # depends on [control=['if'], data=[]]
if tp in self._types:
index = self._types[tp] # depends on [control=['if'], data=['tp']]
else:
return (None, False)
m = self._marshallers[index]
if self._imported_required_modules[index]:
return (m, True) # depends on [control=['if'], data=[]]
if not self._has_required_modules[index]:
return (m, False) # depends on [control=['if'], data=[]]
success = self._import_marshaller_modules(m)
self._has_required_modules[index] = success
self._imported_required_modules[index] = success
return (m, success) |
def benchmark_mitdb(detector, verbose=False, print_results=False):
"""
Benchmark a qrs detector against mitdb's records.
Parameters
----------
detector : function
The detector function.
verbose : bool, optional
The verbose option of the detector function.
print_results : bool, optional
Whether to print the overall performance, and the results for
each record.
Returns
-------
comparitors : dictionary
Dictionary of Comparitor objects run on the records, keyed on
the record names.
specificity : float
Aggregate specificity.
positive_predictivity : float
Aggregate positive_predictivity.
false_positive_rate : float
Aggregate false_positive_rate.
Notes
-----
TODO:
- remove non-qrs detections from reference annotations
- allow kwargs
Examples
--------
>>> import wfdb
>> from wfdb.processing import benchmark_mitdb, xqrs_detect
>>> comparitors, spec, pp, fpr = benchmark_mitdb(xqrs_detect)
"""
record_list = get_record_list('mitdb')
n_records = len(record_list)
# Function arguments for starmap
args = zip(record_list, n_records * [detector], n_records * [verbose])
# Run detector and compare against reference annotations for all
# records
with Pool(cpu_count() - 1) as p:
comparitors = p.starmap(benchmark_mitdb_record, args)
# Calculate aggregate stats
specificity = np.mean([c.specificity for c in comparitors])
positive_predictivity = np.mean(
[c.positive_predictivity for c in comparitors])
false_positive_rate = np.mean(
[c.false_positive_rate for c in comparitors])
comparitors = dict(zip(record_list, comparitors))
print('Benchmark complete')
if print_results:
print('\nOverall MITDB Performance - Specificity: %.4f, Positive Predictivity: %.4f, False Positive Rate: %.4f\n'
% (specificity, positive_predictivity, false_positive_rate))
for record_name in record_list:
print('Record %s:' % record_name)
comparitors[record_name].print_summary()
print('\n\n')
return comparitors, specificity, positive_predictivity, false_positive_rate | def function[benchmark_mitdb, parameter[detector, verbose, print_results]]:
constant[
Benchmark a qrs detector against mitdb's records.
Parameters
----------
detector : function
The detector function.
verbose : bool, optional
The verbose option of the detector function.
print_results : bool, optional
Whether to print the overall performance, and the results for
each record.
Returns
-------
comparitors : dictionary
Dictionary of Comparitor objects run on the records, keyed on
the record names.
specificity : float
Aggregate specificity.
positive_predictivity : float
Aggregate positive_predictivity.
false_positive_rate : float
Aggregate false_positive_rate.
Notes
-----
TODO:
- remove non-qrs detections from reference annotations
- allow kwargs
Examples
--------
>>> import wfdb
>> from wfdb.processing import benchmark_mitdb, xqrs_detect
>>> comparitors, spec, pp, fpr = benchmark_mitdb(xqrs_detect)
]
variable[record_list] assign[=] call[name[get_record_list], parameter[constant[mitdb]]]
variable[n_records] assign[=] call[name[len], parameter[name[record_list]]]
variable[args] assign[=] call[name[zip], parameter[name[record_list], binary_operation[name[n_records] * list[[<ast.Name object at 0x7da1b19ed990>]]], binary_operation[name[n_records] * list[[<ast.Name object at 0x7da1b19ef730>]]]]]
with call[name[Pool], parameter[binary_operation[call[name[cpu_count], parameter[]] - constant[1]]]] begin[:]
variable[comparitors] assign[=] call[name[p].starmap, parameter[name[benchmark_mitdb_record], name[args]]]
variable[specificity] assign[=] call[name[np].mean, parameter[<ast.ListComp object at 0x7da1b19ef940>]]
variable[positive_predictivity] assign[=] call[name[np].mean, parameter[<ast.ListComp object at 0x7da1b19ec5b0>]]
variable[false_positive_rate] assign[=] call[name[np].mean, parameter[<ast.ListComp object at 0x7da1b19ef700>]]
variable[comparitors] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[record_list], name[comparitors]]]]]
call[name[print], parameter[constant[Benchmark complete]]]
if name[print_results] begin[:]
call[name[print], parameter[binary_operation[constant[
Overall MITDB Performance - Specificity: %.4f, Positive Predictivity: %.4f, False Positive Rate: %.4f
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b19ed060>, <ast.Name object at 0x7da1b19eea10>, <ast.Name object at 0x7da1b19ec370>]]]]]
for taget[name[record_name]] in starred[name[record_list]] begin[:]
call[name[print], parameter[binary_operation[constant[Record %s:] <ast.Mod object at 0x7da2590d6920> name[record_name]]]]
call[call[name[comparitors]][name[record_name]].print_summary, parameter[]]
call[name[print], parameter[constant[
]]]
return[tuple[[<ast.Name object at 0x7da1b19ed150>, <ast.Name object at 0x7da1b19ee9e0>, <ast.Name object at 0x7da1b19efaf0>, <ast.Name object at 0x7da1b19ec250>]]] | keyword[def] identifier[benchmark_mitdb] ( identifier[detector] , identifier[verbose] = keyword[False] , identifier[print_results] = keyword[False] ):
literal[string]
identifier[record_list] = identifier[get_record_list] ( literal[string] )
identifier[n_records] = identifier[len] ( identifier[record_list] )
identifier[args] = identifier[zip] ( identifier[record_list] , identifier[n_records] *[ identifier[detector] ], identifier[n_records] *[ identifier[verbose] ])
keyword[with] identifier[Pool] ( identifier[cpu_count] ()- literal[int] ) keyword[as] identifier[p] :
identifier[comparitors] = identifier[p] . identifier[starmap] ( identifier[benchmark_mitdb_record] , identifier[args] )
identifier[specificity] = identifier[np] . identifier[mean] ([ identifier[c] . identifier[specificity] keyword[for] identifier[c] keyword[in] identifier[comparitors] ])
identifier[positive_predictivity] = identifier[np] . identifier[mean] (
[ identifier[c] . identifier[positive_predictivity] keyword[for] identifier[c] keyword[in] identifier[comparitors] ])
identifier[false_positive_rate] = identifier[np] . identifier[mean] (
[ identifier[c] . identifier[false_positive_rate] keyword[for] identifier[c] keyword[in] identifier[comparitors] ])
identifier[comparitors] = identifier[dict] ( identifier[zip] ( identifier[record_list] , identifier[comparitors] ))
identifier[print] ( literal[string] )
keyword[if] identifier[print_results] :
identifier[print] ( literal[string]
%( identifier[specificity] , identifier[positive_predictivity] , identifier[false_positive_rate] ))
keyword[for] identifier[record_name] keyword[in] identifier[record_list] :
identifier[print] ( literal[string] % identifier[record_name] )
identifier[comparitors] [ identifier[record_name] ]. identifier[print_summary] ()
identifier[print] ( literal[string] )
keyword[return] identifier[comparitors] , identifier[specificity] , identifier[positive_predictivity] , identifier[false_positive_rate] | def benchmark_mitdb(detector, verbose=False, print_results=False):
"""
Benchmark a qrs detector against mitdb's records.
Parameters
----------
detector : function
The detector function.
verbose : bool, optional
The verbose option of the detector function.
print_results : bool, optional
Whether to print the overall performance, and the results for
each record.
Returns
-------
comparitors : dictionary
Dictionary of Comparitor objects run on the records, keyed on
the record names.
specificity : float
Aggregate specificity.
positive_predictivity : float
Aggregate positive_predictivity.
false_positive_rate : float
Aggregate false_positive_rate.
Notes
-----
TODO:
- remove non-qrs detections from reference annotations
- allow kwargs
Examples
--------
>>> import wfdb
>> from wfdb.processing import benchmark_mitdb, xqrs_detect
>>> comparitors, spec, pp, fpr = benchmark_mitdb(xqrs_detect)
"""
record_list = get_record_list('mitdb')
n_records = len(record_list)
# Function arguments for starmap
args = zip(record_list, n_records * [detector], n_records * [verbose])
# Run detector and compare against reference annotations for all
# records
with Pool(cpu_count() - 1) as p:
comparitors = p.starmap(benchmark_mitdb_record, args) # depends on [control=['with'], data=['p']]
# Calculate aggregate stats
specificity = np.mean([c.specificity for c in comparitors])
positive_predictivity = np.mean([c.positive_predictivity for c in comparitors])
false_positive_rate = np.mean([c.false_positive_rate for c in comparitors])
comparitors = dict(zip(record_list, comparitors))
print('Benchmark complete')
if print_results:
print('\nOverall MITDB Performance - Specificity: %.4f, Positive Predictivity: %.4f, False Positive Rate: %.4f\n' % (specificity, positive_predictivity, false_positive_rate))
for record_name in record_list:
print('Record %s:' % record_name)
comparitors[record_name].print_summary()
print('\n\n') # depends on [control=['for'], data=['record_name']] # depends on [control=['if'], data=[]]
return (comparitors, specificity, positive_predictivity, false_positive_rate) |
def wrapping(self):
""" Texture wrapping mode """
value = self._wrapping
return value[0] if all([v == value[0] for v in value]) else value | def function[wrapping, parameter[self]]:
constant[ Texture wrapping mode ]
variable[value] assign[=] name[self]._wrapping
return[<ast.IfExp object at 0x7da1b0ebc0d0>] | keyword[def] identifier[wrapping] ( identifier[self] ):
literal[string]
identifier[value] = identifier[self] . identifier[_wrapping]
keyword[return] identifier[value] [ literal[int] ] keyword[if] identifier[all] ([ identifier[v] == identifier[value] [ literal[int] ] keyword[for] identifier[v] keyword[in] identifier[value] ]) keyword[else] identifier[value] | def wrapping(self):
""" Texture wrapping mode """
value = self._wrapping
return value[0] if all([v == value[0] for v in value]) else value |
def on_timer(self, event):
'''Main Loop.'''
state = self.state
self.loopStartTime = time.time()
if state.close_event.wait(0.001):
self.timer.Stop()
self.Destroy()
return
# Check for resizing
self.checkReszie()
if self.resized:
self.on_idle(0)
# Get attitude information
while state.child_pipe_recv.poll():
objList = state.child_pipe_recv.recv()
for obj in objList:
self.calcFontScaling()
if isinstance(obj,Attitude):
self.oldRoll = self.roll
self.pitch = obj.pitch*180/math.pi
self.roll = obj.roll*180/math.pi
self.yaw = obj.yaw*180/math.pi
# Update Roll, Pitch, Yaw Text Text
self.updateRPYText()
# Recalculate Horizon Polygons
self.calcHorizonPoints()
# Update Pitch Markers
self.adjustPitchmarkers()
elif isinstance(obj,VFR_HUD):
self.heading = obj.heading
self.airspeed = obj.airspeed
self.climbRate = obj.climbRate
# Update Airpseed, Altitude, Climb Rate Locations
self.updateAARText()
# Update Heading North Pointer
self.adjustHeadingPointer()
self.adjustNorthPointer()
elif isinstance(obj,Global_Position_INT):
self.relAlt = obj.relAlt
self.relAltTime = obj.curTime
# Update Airpseed, Altitude, Climb Rate Locations
self.updateAARText()
# Update Altitude History
self.updateAltHistory()
elif isinstance(obj,BatteryInfo):
self.voltage = obj.voltage
self.current = obj.current
self.batRemain = obj.batRemain
# Update Battery Bar
self.updateBatteryBar()
elif isinstance(obj,FlightState):
self.mode = obj.mode
self.armed = obj.armState
# Update Mode and Arm State Text
self.updateStateText()
elif isinstance(obj,WaypointInfo):
self.currentWP = obj.current
self.finalWP = obj.final
self.wpDist = obj.currentDist
self.nextWPTime = obj.nextWPTime
if obj.wpBearing < 0.0:
self.wpBearing = obj.wpBearing + 360
else:
self.wpBearing = obj.wpBearing
# Update waypoint text
self.updateWPText()
# Adjust Waypoint Pointer
self.adjustWPPointer()
elif isinstance(obj, FPS):
# Update fps target
self.fps = obj.fps
# Quit Drawing if too early
if (time.time() > self.nextTime):
# Update Matplotlib Plot
self.canvas.draw()
self.canvas.Refresh()
self.Refresh()
self.Update()
# Calculate next frame time
if (self.fps > 0):
fpsTime = 1/self.fps
self.nextTime = fpsTime + self.loopStartTime
else:
self.nextTime = time.time() | def function[on_timer, parameter[self, event]]:
constant[Main Loop.]
variable[state] assign[=] name[self].state
name[self].loopStartTime assign[=] call[name[time].time, parameter[]]
if call[name[state].close_event.wait, parameter[constant[0.001]]] begin[:]
call[name[self].timer.Stop, parameter[]]
call[name[self].Destroy, parameter[]]
return[None]
call[name[self].checkReszie, parameter[]]
if name[self].resized begin[:]
call[name[self].on_idle, parameter[constant[0]]]
while call[name[state].child_pipe_recv.poll, parameter[]] begin[:]
variable[objList] assign[=] call[name[state].child_pipe_recv.recv, parameter[]]
for taget[name[obj]] in starred[name[objList]] begin[:]
call[name[self].calcFontScaling, parameter[]]
if call[name[isinstance], parameter[name[obj], name[Attitude]]] begin[:]
name[self].oldRoll assign[=] name[self].roll
name[self].pitch assign[=] binary_operation[binary_operation[name[obj].pitch * constant[180]] / name[math].pi]
name[self].roll assign[=] binary_operation[binary_operation[name[obj].roll * constant[180]] / name[math].pi]
name[self].yaw assign[=] binary_operation[binary_operation[name[obj].yaw * constant[180]] / name[math].pi]
call[name[self].updateRPYText, parameter[]]
call[name[self].calcHorizonPoints, parameter[]]
call[name[self].adjustPitchmarkers, parameter[]]
if compare[call[name[time].time, parameter[]] greater[>] name[self].nextTime] begin[:]
call[name[self].canvas.draw, parameter[]]
call[name[self].canvas.Refresh, parameter[]]
call[name[self].Refresh, parameter[]]
call[name[self].Update, parameter[]]
if compare[name[self].fps greater[>] constant[0]] begin[:]
variable[fpsTime] assign[=] binary_operation[constant[1] / name[self].fps]
name[self].nextTime assign[=] binary_operation[name[fpsTime] + name[self].loopStartTime] | keyword[def] identifier[on_timer] ( identifier[self] , identifier[event] ):
literal[string]
identifier[state] = identifier[self] . identifier[state]
identifier[self] . identifier[loopStartTime] = identifier[time] . identifier[time] ()
keyword[if] identifier[state] . identifier[close_event] . identifier[wait] ( literal[int] ):
identifier[self] . identifier[timer] . identifier[Stop] ()
identifier[self] . identifier[Destroy] ()
keyword[return]
identifier[self] . identifier[checkReszie] ()
keyword[if] identifier[self] . identifier[resized] :
identifier[self] . identifier[on_idle] ( literal[int] )
keyword[while] identifier[state] . identifier[child_pipe_recv] . identifier[poll] ():
identifier[objList] = identifier[state] . identifier[child_pipe_recv] . identifier[recv] ()
keyword[for] identifier[obj] keyword[in] identifier[objList] :
identifier[self] . identifier[calcFontScaling] ()
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Attitude] ):
identifier[self] . identifier[oldRoll] = identifier[self] . identifier[roll]
identifier[self] . identifier[pitch] = identifier[obj] . identifier[pitch] * literal[int] / identifier[math] . identifier[pi]
identifier[self] . identifier[roll] = identifier[obj] . identifier[roll] * literal[int] / identifier[math] . identifier[pi]
identifier[self] . identifier[yaw] = identifier[obj] . identifier[yaw] * literal[int] / identifier[math] . identifier[pi]
identifier[self] . identifier[updateRPYText] ()
identifier[self] . identifier[calcHorizonPoints] ()
identifier[self] . identifier[adjustPitchmarkers] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[VFR_HUD] ):
identifier[self] . identifier[heading] = identifier[obj] . identifier[heading]
identifier[self] . identifier[airspeed] = identifier[obj] . identifier[airspeed]
identifier[self] . identifier[climbRate] = identifier[obj] . identifier[climbRate]
identifier[self] . identifier[updateAARText] ()
identifier[self] . identifier[adjustHeadingPointer] ()
identifier[self] . identifier[adjustNorthPointer] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[Global_Position_INT] ):
identifier[self] . identifier[relAlt] = identifier[obj] . identifier[relAlt]
identifier[self] . identifier[relAltTime] = identifier[obj] . identifier[curTime]
identifier[self] . identifier[updateAARText] ()
identifier[self] . identifier[updateAltHistory] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[BatteryInfo] ):
identifier[self] . identifier[voltage] = identifier[obj] . identifier[voltage]
identifier[self] . identifier[current] = identifier[obj] . identifier[current]
identifier[self] . identifier[batRemain] = identifier[obj] . identifier[batRemain]
identifier[self] . identifier[updateBatteryBar] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[FlightState] ):
identifier[self] . identifier[mode] = identifier[obj] . identifier[mode]
identifier[self] . identifier[armed] = identifier[obj] . identifier[armState]
identifier[self] . identifier[updateStateText] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[WaypointInfo] ):
identifier[self] . identifier[currentWP] = identifier[obj] . identifier[current]
identifier[self] . identifier[finalWP] = identifier[obj] . identifier[final]
identifier[self] . identifier[wpDist] = identifier[obj] . identifier[currentDist]
identifier[self] . identifier[nextWPTime] = identifier[obj] . identifier[nextWPTime]
keyword[if] identifier[obj] . identifier[wpBearing] < literal[int] :
identifier[self] . identifier[wpBearing] = identifier[obj] . identifier[wpBearing] + literal[int]
keyword[else] :
identifier[self] . identifier[wpBearing] = identifier[obj] . identifier[wpBearing]
identifier[self] . identifier[updateWPText] ()
identifier[self] . identifier[adjustWPPointer] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[FPS] ):
identifier[self] . identifier[fps] = identifier[obj] . identifier[fps]
keyword[if] ( identifier[time] . identifier[time] ()> identifier[self] . identifier[nextTime] ):
identifier[self] . identifier[canvas] . identifier[draw] ()
identifier[self] . identifier[canvas] . identifier[Refresh] ()
identifier[self] . identifier[Refresh] ()
identifier[self] . identifier[Update] ()
keyword[if] ( identifier[self] . identifier[fps] > literal[int] ):
identifier[fpsTime] = literal[int] / identifier[self] . identifier[fps]
identifier[self] . identifier[nextTime] = identifier[fpsTime] + identifier[self] . identifier[loopStartTime]
keyword[else] :
identifier[self] . identifier[nextTime] = identifier[time] . identifier[time] () | def on_timer(self, event):
"""Main Loop."""
state = self.state
self.loopStartTime = time.time()
if state.close_event.wait(0.001):
self.timer.Stop()
self.Destroy()
return # depends on [control=['if'], data=[]]
# Check for resizing
self.checkReszie()
if self.resized:
self.on_idle(0) # depends on [control=['if'], data=[]]
# Get attitude information
while state.child_pipe_recv.poll():
objList = state.child_pipe_recv.recv()
for obj in objList:
self.calcFontScaling()
if isinstance(obj, Attitude):
self.oldRoll = self.roll
self.pitch = obj.pitch * 180 / math.pi
self.roll = obj.roll * 180 / math.pi
self.yaw = obj.yaw * 180 / math.pi
# Update Roll, Pitch, Yaw Text Text
self.updateRPYText()
# Recalculate Horizon Polygons
self.calcHorizonPoints()
# Update Pitch Markers
self.adjustPitchmarkers() # depends on [control=['if'], data=[]]
elif isinstance(obj, VFR_HUD):
self.heading = obj.heading
self.airspeed = obj.airspeed
self.climbRate = obj.climbRate
# Update Airpseed, Altitude, Climb Rate Locations
self.updateAARText()
# Update Heading North Pointer
self.adjustHeadingPointer()
self.adjustNorthPointer() # depends on [control=['if'], data=[]]
elif isinstance(obj, Global_Position_INT):
self.relAlt = obj.relAlt
self.relAltTime = obj.curTime
# Update Airpseed, Altitude, Climb Rate Locations
self.updateAARText()
# Update Altitude History
self.updateAltHistory() # depends on [control=['if'], data=[]]
elif isinstance(obj, BatteryInfo):
self.voltage = obj.voltage
self.current = obj.current
self.batRemain = obj.batRemain
# Update Battery Bar
self.updateBatteryBar() # depends on [control=['if'], data=[]]
elif isinstance(obj, FlightState):
self.mode = obj.mode
self.armed = obj.armState
# Update Mode and Arm State Text
self.updateStateText() # depends on [control=['if'], data=[]]
elif isinstance(obj, WaypointInfo):
self.currentWP = obj.current
self.finalWP = obj.final
self.wpDist = obj.currentDist
self.nextWPTime = obj.nextWPTime
if obj.wpBearing < 0.0:
self.wpBearing = obj.wpBearing + 360 # depends on [control=['if'], data=[]]
else:
self.wpBearing = obj.wpBearing
# Update waypoint text
self.updateWPText()
# Adjust Waypoint Pointer
self.adjustWPPointer() # depends on [control=['if'], data=[]]
elif isinstance(obj, FPS):
# Update fps target
self.fps = obj.fps # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] # depends on [control=['while'], data=[]]
# Quit Drawing if too early
if time.time() > self.nextTime:
# Update Matplotlib Plot
self.canvas.draw()
self.canvas.Refresh()
self.Refresh()
self.Update()
# Calculate next frame time
if self.fps > 0:
fpsTime = 1 / self.fps
self.nextTime = fpsTime + self.loopStartTime # depends on [control=['if'], data=[]]
else:
self.nextTime = time.time() # depends on [control=['if'], data=[]] |
def add_to_island_expectations_dict(average_window_readcount,
current_max_scaled_score,
island_eligibility_threshold,
island_expectations, gap_contribution):
# type: ( float, int, float, Dict[int, float], float) -> Dict[int, float]
"""Can probably be heavily optimized.
Time required to run can be seen from logging info."""
scaled_score = current_max_scaled_score + E_VALUE
for index in range(current_max_scaled_score + 1, scaled_score + 1):
island_expectation = 0.0
i = island_eligibility_threshold #i is the number of tags in the added window
current_island = int(round(index - compute_window_score(
i, average_window_readcount) / BIN_SIZE))
while (current_island >= 0):
if current_island in island_expectations:
island_expectation += _poisson(
i, average_window_readcount) * island_expectations[
current_island]
i += 1
current_island = int(round(index - compute_window_score(
i, average_window_readcount) / BIN_SIZE))
island_expectation *= gap_contribution
if island_expectation:
island_expectations[index] = island_expectation
return island_expectations | def function[add_to_island_expectations_dict, parameter[average_window_readcount, current_max_scaled_score, island_eligibility_threshold, island_expectations, gap_contribution]]:
constant[Can probably be heavily optimized.
Time required to run can be seen from logging info.]
variable[scaled_score] assign[=] binary_operation[name[current_max_scaled_score] + name[E_VALUE]]
for taget[name[index]] in starred[call[name[range], parameter[binary_operation[name[current_max_scaled_score] + constant[1]], binary_operation[name[scaled_score] + constant[1]]]]] begin[:]
variable[island_expectation] assign[=] constant[0.0]
variable[i] assign[=] name[island_eligibility_threshold]
variable[current_island] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[index] - binary_operation[call[name[compute_window_score], parameter[name[i], name[average_window_readcount]]] / name[BIN_SIZE]]]]]]]
while compare[name[current_island] greater_or_equal[>=] constant[0]] begin[:]
if compare[name[current_island] in name[island_expectations]] begin[:]
<ast.AugAssign object at 0x7da20e9b0d30>
<ast.AugAssign object at 0x7da20e9b02e0>
variable[current_island] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[index] - binary_operation[call[name[compute_window_score], parameter[name[i], name[average_window_readcount]]] / name[BIN_SIZE]]]]]]]
<ast.AugAssign object at 0x7da20e9b0f10>
if name[island_expectation] begin[:]
call[name[island_expectations]][name[index]] assign[=] name[island_expectation]
return[name[island_expectations]] | keyword[def] identifier[add_to_island_expectations_dict] ( identifier[average_window_readcount] ,
identifier[current_max_scaled_score] ,
identifier[island_eligibility_threshold] ,
identifier[island_expectations] , identifier[gap_contribution] ):
literal[string]
identifier[scaled_score] = identifier[current_max_scaled_score] + identifier[E_VALUE]
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[current_max_scaled_score] + literal[int] , identifier[scaled_score] + literal[int] ):
identifier[island_expectation] = literal[int]
identifier[i] = identifier[island_eligibility_threshold]
identifier[current_island] = identifier[int] ( identifier[round] ( identifier[index] - identifier[compute_window_score] (
identifier[i] , identifier[average_window_readcount] )/ identifier[BIN_SIZE] ))
keyword[while] ( identifier[current_island] >= literal[int] ):
keyword[if] identifier[current_island] keyword[in] identifier[island_expectations] :
identifier[island_expectation] += identifier[_poisson] (
identifier[i] , identifier[average_window_readcount] )* identifier[island_expectations] [
identifier[current_island] ]
identifier[i] += literal[int]
identifier[current_island] = identifier[int] ( identifier[round] ( identifier[index] - identifier[compute_window_score] (
identifier[i] , identifier[average_window_readcount] )/ identifier[BIN_SIZE] ))
identifier[island_expectation] *= identifier[gap_contribution]
keyword[if] identifier[island_expectation] :
identifier[island_expectations] [ identifier[index] ]= identifier[island_expectation]
keyword[return] identifier[island_expectations] | def add_to_island_expectations_dict(average_window_readcount, current_max_scaled_score, island_eligibility_threshold, island_expectations, gap_contribution):
# type: ( float, int, float, Dict[int, float], float) -> Dict[int, float]
'Can probably be heavily optimized.\n Time required to run can be seen from logging info.'
scaled_score = current_max_scaled_score + E_VALUE
for index in range(current_max_scaled_score + 1, scaled_score + 1):
island_expectation = 0.0
i = island_eligibility_threshold #i is the number of tags in the added window
current_island = int(round(index - compute_window_score(i, average_window_readcount) / BIN_SIZE))
while current_island >= 0:
if current_island in island_expectations:
island_expectation += _poisson(i, average_window_readcount) * island_expectations[current_island] # depends on [control=['if'], data=['current_island', 'island_expectations']]
i += 1
current_island = int(round(index - compute_window_score(i, average_window_readcount) / BIN_SIZE)) # depends on [control=['while'], data=['current_island']]
island_expectation *= gap_contribution
if island_expectation:
island_expectations[index] = island_expectation # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
return island_expectations |
def atlas_get_zonefile_inventory( offset=None, length=None ):
"""
Get the in-RAM zonefile inventory vector.
offset and length are in *bytes*
"""
global ZONEFILE_INV, ZONEFILE_INV_LOCK
with ZONEFILE_INV_LOCK:
try:
assert ZONEFILE_INV is not None
except AssertionError:
log.error("FATAL: zonefile inventory not loaded")
os.abort()
if offset is None:
offset = 0
if length is None:
length = len(ZONEFILE_INV) - offset
if offset >= len(ZONEFILE_INV):
return ""
if offset + length > len(ZONEFILE_INV):
length = len(ZONEFILE_INV) - offset
ret = ZONEFILE_INV[offset:offset+length]
return ret | def function[atlas_get_zonefile_inventory, parameter[offset, length]]:
constant[
Get the in-RAM zonefile inventory vector.
offset and length are in *bytes*
]
<ast.Global object at 0x7da20e962920>
with name[ZONEFILE_INV_LOCK] begin[:]
<ast.Try object at 0x7da20e960190>
if compare[name[offset] is constant[None]] begin[:]
variable[offset] assign[=] constant[0]
if compare[name[length] is constant[None]] begin[:]
variable[length] assign[=] binary_operation[call[name[len], parameter[name[ZONEFILE_INV]]] - name[offset]]
if compare[name[offset] greater_or_equal[>=] call[name[len], parameter[name[ZONEFILE_INV]]]] begin[:]
return[constant[]]
if compare[binary_operation[name[offset] + name[length]] greater[>] call[name[len], parameter[name[ZONEFILE_INV]]]] begin[:]
variable[length] assign[=] binary_operation[call[name[len], parameter[name[ZONEFILE_INV]]] - name[offset]]
variable[ret] assign[=] call[name[ZONEFILE_INV]][<ast.Slice object at 0x7da20e963160>]
return[name[ret]] | keyword[def] identifier[atlas_get_zonefile_inventory] ( identifier[offset] = keyword[None] , identifier[length] = keyword[None] ):
literal[string]
keyword[global] identifier[ZONEFILE_INV] , identifier[ZONEFILE_INV_LOCK]
keyword[with] identifier[ZONEFILE_INV_LOCK] :
keyword[try] :
keyword[assert] identifier[ZONEFILE_INV] keyword[is] keyword[not] keyword[None]
keyword[except] identifier[AssertionError] :
identifier[log] . identifier[error] ( literal[string] )
identifier[os] . identifier[abort] ()
keyword[if] identifier[offset] keyword[is] keyword[None] :
identifier[offset] = literal[int]
keyword[if] identifier[length] keyword[is] keyword[None] :
identifier[length] = identifier[len] ( identifier[ZONEFILE_INV] )- identifier[offset]
keyword[if] identifier[offset] >= identifier[len] ( identifier[ZONEFILE_INV] ):
keyword[return] literal[string]
keyword[if] identifier[offset] + identifier[length] > identifier[len] ( identifier[ZONEFILE_INV] ):
identifier[length] = identifier[len] ( identifier[ZONEFILE_INV] )- identifier[offset]
identifier[ret] = identifier[ZONEFILE_INV] [ identifier[offset] : identifier[offset] + identifier[length] ]
keyword[return] identifier[ret] | def atlas_get_zonefile_inventory(offset=None, length=None):
"""
Get the in-RAM zonefile inventory vector.
offset and length are in *bytes*
"""
global ZONEFILE_INV, ZONEFILE_INV_LOCK
with ZONEFILE_INV_LOCK:
try:
assert ZONEFILE_INV is not None # depends on [control=['try'], data=[]]
except AssertionError:
log.error('FATAL: zonefile inventory not loaded')
os.abort() # depends on [control=['except'], data=[]]
if offset is None:
offset = 0 # depends on [control=['if'], data=['offset']]
if length is None:
length = len(ZONEFILE_INV) - offset # depends on [control=['if'], data=['length']]
if offset >= len(ZONEFILE_INV):
return '' # depends on [control=['if'], data=[]]
if offset + length > len(ZONEFILE_INV):
length = len(ZONEFILE_INV) - offset # depends on [control=['if'], data=[]]
ret = ZONEFILE_INV[offset:offset + length]
return ret # depends on [control=['with'], data=[]] |
def _parseNetDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the NET directory.
@see: U{http://www.ntcore.com/files/dotnetformat.htm}
@type rva: int
@param rva: The RVA where the NET directory starts.
@type size: int
@param size: The size of the NET directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
"""
if not rva or not size:
return None
# create a NETDirectory class to hold the data
netDirectoryClass = directories.NETDirectory()
# parse the .NET Directory
netDir = directories.NetDirectory.parse(utils.ReadData(self.getDataAtRva(rva, size)))
netDirectoryClass.directory = netDir
# get the MetaData RVA and Size
mdhRva = netDir.metaData.rva.value
mdhSize = netDir.metaData.size.value
# read all the MetaData
rd = utils.ReadData(self.getDataAtRva(mdhRva, mdhSize))
# parse the MetaData headers
netDirectoryClass.netMetaDataHeader = directories.NetMetaDataHeader.parse(rd)
# parse the NET metadata streams
numberOfStreams = netDirectoryClass.netMetaDataHeader.numberOfStreams.value
netDirectoryClass.netMetaDataStreams = directories.NetMetaDataStreams.parse(rd, numberOfStreams)
for i in range(numberOfStreams):
stream = netDirectoryClass.netMetaDataStreams[i]
name = stream.name.value
rd.setOffset(stream.offset.value)
rd2 = utils.ReadData(rd.read(stream.size.value))
stream.info = []
if name == "#~" or i == 0:
stream.info = rd2
elif name == "#Strings" or i == 1:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetString() })
elif name == "#US" or i == 2:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetUnicodeString() })
elif name == "#GUID" or i == 3:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetGuid() })
elif name == "#Blob" or i == 4:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetBlob() })
for i in range(numberOfStreams):
stream = netDirectoryClass.netMetaDataStreams[i]
name = stream.name.value
if name == "#~" or i == 0:
stream.info = directories.NetMetaDataTables.parse(stream.info, netDirectoryClass.netMetaDataStreams)
# parse .NET resources
# get the Resources RVA and Size
resRva = netDir.resources.rva.value
resSize = netDir.resources.size.value
# read all the MetaData
rd = utils.ReadData(self.getDataAtRva(resRva, resSize))
resources = []
for i in netDirectoryClass.netMetaDataStreams[0].info.tables["ManifestResource"]:
offset = i["offset"]
rd.setOffset(offset)
size = rd.readDword()
data = rd.read(size)
if data[:4] == "\xce\xca\xef\xbe":
data = directories.NetResources.parse(utils.ReadData(data))
resources.append({ "name": i["name"], "offset": offset + 4, "size": size, "data": data })
netDirectoryClass.directory.resources.info = resources
return netDirectoryClass | def function[_parseNetDirectory, parameter[self, rva, size, magic]]:
constant[
Parses the NET directory.
@see: U{http://www.ntcore.com/files/dotnetformat.htm}
@type rva: int
@param rva: The RVA where the NET directory starts.
@type size: int
@param size: The size of the NET directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
]
if <ast.BoolOp object at 0x7da20c76f880> begin[:]
return[constant[None]]
variable[netDirectoryClass] assign[=] call[name[directories].NETDirectory, parameter[]]
variable[netDir] assign[=] call[name[directories].NetDirectory.parse, parameter[call[name[utils].ReadData, parameter[call[name[self].getDataAtRva, parameter[name[rva], name[size]]]]]]]
name[netDirectoryClass].directory assign[=] name[netDir]
variable[mdhRva] assign[=] name[netDir].metaData.rva.value
variable[mdhSize] assign[=] name[netDir].metaData.size.value
variable[rd] assign[=] call[name[utils].ReadData, parameter[call[name[self].getDataAtRva, parameter[name[mdhRva], name[mdhSize]]]]]
name[netDirectoryClass].netMetaDataHeader assign[=] call[name[directories].NetMetaDataHeader.parse, parameter[name[rd]]]
variable[numberOfStreams] assign[=] name[netDirectoryClass].netMetaDataHeader.numberOfStreams.value
name[netDirectoryClass].netMetaDataStreams assign[=] call[name[directories].NetMetaDataStreams.parse, parameter[name[rd], name[numberOfStreams]]]
for taget[name[i]] in starred[call[name[range], parameter[name[numberOfStreams]]]] begin[:]
variable[stream] assign[=] call[name[netDirectoryClass].netMetaDataStreams][name[i]]
variable[name] assign[=] name[stream].name.value
call[name[rd].setOffset, parameter[name[stream].offset.value]]
variable[rd2] assign[=] call[name[utils].ReadData, parameter[call[name[rd].read, parameter[name[stream].size.value]]]]
name[stream].info assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c76e9e0> begin[:]
name[stream].info assign[=] name[rd2]
for taget[name[i]] in starred[call[name[range], parameter[name[numberOfStreams]]]] begin[:]
variable[stream] assign[=] call[name[netDirectoryClass].netMetaDataStreams][name[i]]
variable[name] assign[=] name[stream].name.value
if <ast.BoolOp object at 0x7da20c76c8b0> begin[:]
name[stream].info assign[=] call[name[directories].NetMetaDataTables.parse, parameter[name[stream].info, name[netDirectoryClass].netMetaDataStreams]]
variable[resRva] assign[=] name[netDir].resources.rva.value
variable[resSize] assign[=] name[netDir].resources.size.value
variable[rd] assign[=] call[name[utils].ReadData, parameter[call[name[self].getDataAtRva, parameter[name[resRva], name[resSize]]]]]
variable[resources] assign[=] list[[]]
for taget[name[i]] in starred[call[call[name[netDirectoryClass].netMetaDataStreams][constant[0]].info.tables][constant[ManifestResource]]] begin[:]
variable[offset] assign[=] call[name[i]][constant[offset]]
call[name[rd].setOffset, parameter[name[offset]]]
variable[size] assign[=] call[name[rd].readDword, parameter[]]
variable[data] assign[=] call[name[rd].read, parameter[name[size]]]
if compare[call[name[data]][<ast.Slice object at 0x7da20c6c4f70>] equal[==] constant[ÎÊï¾]] begin[:]
variable[data] assign[=] call[name[directories].NetResources.parse, parameter[call[name[utils].ReadData, parameter[name[data]]]]]
call[name[resources].append, parameter[dictionary[[<ast.Constant object at 0x7da20c6c62c0>, <ast.Constant object at 0x7da20c6c7cd0>, <ast.Constant object at 0x7da20c6c7b80>, <ast.Constant object at 0x7da20c6c6110>], [<ast.Subscript object at 0x7da20c6c6980>, <ast.BinOp object at 0x7da20c6c5660>, <ast.Name object at 0x7da20c6c7e20>, <ast.Name object at 0x7da20c6c48b0>]]]]
name[netDirectoryClass].directory.resources.info assign[=] name[resources]
return[name[netDirectoryClass]] | keyword[def] identifier[_parseNetDirectory] ( identifier[self] , identifier[rva] , identifier[size] , identifier[magic] = identifier[consts] . identifier[PE32] ):
literal[string]
keyword[if] keyword[not] identifier[rva] keyword[or] keyword[not] identifier[size] :
keyword[return] keyword[None]
identifier[netDirectoryClass] = identifier[directories] . identifier[NETDirectory] ()
identifier[netDir] = identifier[directories] . identifier[NetDirectory] . identifier[parse] ( identifier[utils] . identifier[ReadData] ( identifier[self] . identifier[getDataAtRva] ( identifier[rva] , identifier[size] )))
identifier[netDirectoryClass] . identifier[directory] = identifier[netDir]
identifier[mdhRva] = identifier[netDir] . identifier[metaData] . identifier[rva] . identifier[value]
identifier[mdhSize] = identifier[netDir] . identifier[metaData] . identifier[size] . identifier[value]
identifier[rd] = identifier[utils] . identifier[ReadData] ( identifier[self] . identifier[getDataAtRva] ( identifier[mdhRva] , identifier[mdhSize] ))
identifier[netDirectoryClass] . identifier[netMetaDataHeader] = identifier[directories] . identifier[NetMetaDataHeader] . identifier[parse] ( identifier[rd] )
identifier[numberOfStreams] = identifier[netDirectoryClass] . identifier[netMetaDataHeader] . identifier[numberOfStreams] . identifier[value]
identifier[netDirectoryClass] . identifier[netMetaDataStreams] = identifier[directories] . identifier[NetMetaDataStreams] . identifier[parse] ( identifier[rd] , identifier[numberOfStreams] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[numberOfStreams] ):
identifier[stream] = identifier[netDirectoryClass] . identifier[netMetaDataStreams] [ identifier[i] ]
identifier[name] = identifier[stream] . identifier[name] . identifier[value]
identifier[rd] . identifier[setOffset] ( identifier[stream] . identifier[offset] . identifier[value] )
identifier[rd2] = identifier[utils] . identifier[ReadData] ( identifier[rd] . identifier[read] ( identifier[stream] . identifier[size] . identifier[value] ))
identifier[stream] . identifier[info] =[]
keyword[if] identifier[name] == literal[string] keyword[or] identifier[i] == literal[int] :
identifier[stream] . identifier[info] = identifier[rd2]
keyword[elif] identifier[name] == literal[string] keyword[or] identifier[i] == literal[int] :
keyword[while] identifier[len] ( identifier[rd2] )> literal[int] :
identifier[offset] = identifier[rd2] . identifier[tell] ()
identifier[stream] . identifier[info] . identifier[append] ({ identifier[offset] : identifier[rd2] . identifier[readDotNetString] ()})
keyword[elif] identifier[name] == literal[string] keyword[or] identifier[i] == literal[int] :
keyword[while] identifier[len] ( identifier[rd2] )> literal[int] :
identifier[offset] = identifier[rd2] . identifier[tell] ()
identifier[stream] . identifier[info] . identifier[append] ({ identifier[offset] : identifier[rd2] . identifier[readDotNetUnicodeString] ()})
keyword[elif] identifier[name] == literal[string] keyword[or] identifier[i] == literal[int] :
keyword[while] identifier[len] ( identifier[rd2] )> literal[int] :
identifier[offset] = identifier[rd2] . identifier[tell] ()
identifier[stream] . identifier[info] . identifier[append] ({ identifier[offset] : identifier[rd2] . identifier[readDotNetGuid] ()})
keyword[elif] identifier[name] == literal[string] keyword[or] identifier[i] == literal[int] :
keyword[while] identifier[len] ( identifier[rd2] )> literal[int] :
identifier[offset] = identifier[rd2] . identifier[tell] ()
identifier[stream] . identifier[info] . identifier[append] ({ identifier[offset] : identifier[rd2] . identifier[readDotNetBlob] ()})
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[numberOfStreams] ):
identifier[stream] = identifier[netDirectoryClass] . identifier[netMetaDataStreams] [ identifier[i] ]
identifier[name] = identifier[stream] . identifier[name] . identifier[value]
keyword[if] identifier[name] == literal[string] keyword[or] identifier[i] == literal[int] :
identifier[stream] . identifier[info] = identifier[directories] . identifier[NetMetaDataTables] . identifier[parse] ( identifier[stream] . identifier[info] , identifier[netDirectoryClass] . identifier[netMetaDataStreams] )
identifier[resRva] = identifier[netDir] . identifier[resources] . identifier[rva] . identifier[value]
identifier[resSize] = identifier[netDir] . identifier[resources] . identifier[size] . identifier[value]
identifier[rd] = identifier[utils] . identifier[ReadData] ( identifier[self] . identifier[getDataAtRva] ( identifier[resRva] , identifier[resSize] ))
identifier[resources] =[]
keyword[for] identifier[i] keyword[in] identifier[netDirectoryClass] . identifier[netMetaDataStreams] [ literal[int] ]. identifier[info] . identifier[tables] [ literal[string] ]:
identifier[offset] = identifier[i] [ literal[string] ]
identifier[rd] . identifier[setOffset] ( identifier[offset] )
identifier[size] = identifier[rd] . identifier[readDword] ()
identifier[data] = identifier[rd] . identifier[read] ( identifier[size] )
keyword[if] identifier[data] [: literal[int] ]== literal[string] :
identifier[data] = identifier[directories] . identifier[NetResources] . identifier[parse] ( identifier[utils] . identifier[ReadData] ( identifier[data] ))
identifier[resources] . identifier[append] ({ literal[string] : identifier[i] [ literal[string] ], literal[string] : identifier[offset] + literal[int] , literal[string] : identifier[size] , literal[string] : identifier[data] })
identifier[netDirectoryClass] . identifier[directory] . identifier[resources] . identifier[info] = identifier[resources]
keyword[return] identifier[netDirectoryClass] | def _parseNetDirectory(self, rva, size, magic=consts.PE32):
"""
Parses the NET directory.
@see: U{http://www.ntcore.com/files/dotnetformat.htm}
@type rva: int
@param rva: The RVA where the NET directory starts.
@type size: int
@param size: The size of the NET directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
"""
if not rva or not size:
return None # depends on [control=['if'], data=[]]
# create a NETDirectory class to hold the data
netDirectoryClass = directories.NETDirectory()
# parse the .NET Directory
netDir = directories.NetDirectory.parse(utils.ReadData(self.getDataAtRva(rva, size)))
netDirectoryClass.directory = netDir
# get the MetaData RVA and Size
mdhRva = netDir.metaData.rva.value
mdhSize = netDir.metaData.size.value
# read all the MetaData
rd = utils.ReadData(self.getDataAtRva(mdhRva, mdhSize))
# parse the MetaData headers
netDirectoryClass.netMetaDataHeader = directories.NetMetaDataHeader.parse(rd)
# parse the NET metadata streams
numberOfStreams = netDirectoryClass.netMetaDataHeader.numberOfStreams.value
netDirectoryClass.netMetaDataStreams = directories.NetMetaDataStreams.parse(rd, numberOfStreams)
for i in range(numberOfStreams):
stream = netDirectoryClass.netMetaDataStreams[i]
name = stream.name.value
rd.setOffset(stream.offset.value)
rd2 = utils.ReadData(rd.read(stream.size.value))
stream.info = []
if name == '#~' or i == 0:
stream.info = rd2 # depends on [control=['if'], data=[]]
elif name == '#Strings' or i == 1:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({offset: rd2.readDotNetString()}) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
elif name == '#US' or i == 2:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({offset: rd2.readDotNetUnicodeString()}) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
elif name == '#GUID' or i == 3:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({offset: rd2.readDotNetGuid()}) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
elif name == '#Blob' or i == 4:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({offset: rd2.readDotNetBlob()}) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
for i in range(numberOfStreams):
stream = netDirectoryClass.netMetaDataStreams[i]
name = stream.name.value
if name == '#~' or i == 0:
stream.info = directories.NetMetaDataTables.parse(stream.info, netDirectoryClass.netMetaDataStreams) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# parse .NET resources
# get the Resources RVA and Size
resRva = netDir.resources.rva.value
resSize = netDir.resources.size.value
# read all the MetaData
rd = utils.ReadData(self.getDataAtRva(resRva, resSize))
resources = []
for i in netDirectoryClass.netMetaDataStreams[0].info.tables['ManifestResource']:
offset = i['offset']
rd.setOffset(offset)
size = rd.readDword()
data = rd.read(size)
if data[:4] == 'ÎÊï¾':
data = directories.NetResources.parse(utils.ReadData(data)) # depends on [control=['if'], data=[]]
resources.append({'name': i['name'], 'offset': offset + 4, 'size': size, 'data': data}) # depends on [control=['for'], data=['i']]
netDirectoryClass.directory.resources.info = resources
return netDirectoryClass |
def get_configs(self, name=None):
"""
Returns registred configurations.
* If ``name`` argument is not given, default behavior is to return
every config from all registred config;
* If ``name`` argument is given, just return its config and nothing
else;
Keyword Arguments:
name (string): Specific configuration name to return.
Raises:
NotRegisteredError: If given config name does not exist in
registry.
Returns:
dict: Configurations.
"""
if name:
if name not in self.registry:
msg = "Given config name '{}' is not registered."
raise NotRegisteredError(msg.format(name))
return {name: self.registry[name]}
return self.registry | def function[get_configs, parameter[self, name]]:
constant[
Returns registred configurations.
* If ``name`` argument is not given, default behavior is to return
every config from all registred config;
* If ``name`` argument is given, just return its config and nothing
else;
Keyword Arguments:
name (string): Specific configuration name to return.
Raises:
NotRegisteredError: If given config name does not exist in
registry.
Returns:
dict: Configurations.
]
if name[name] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].registry] begin[:]
variable[msg] assign[=] constant[Given config name '{}' is not registered.]
<ast.Raise object at 0x7da20e74ada0>
return[dictionary[[<ast.Name object at 0x7da20e7497b0>], [<ast.Subscript object at 0x7da20e74be50>]]]
return[name[self].registry] | keyword[def] identifier[get_configs] ( identifier[self] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[name] :
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[registry] :
identifier[msg] = literal[string]
keyword[raise] identifier[NotRegisteredError] ( identifier[msg] . identifier[format] ( identifier[name] ))
keyword[return] { identifier[name] : identifier[self] . identifier[registry] [ identifier[name] ]}
keyword[return] identifier[self] . identifier[registry] | def get_configs(self, name=None):
"""
Returns registred configurations.
* If ``name`` argument is not given, default behavior is to return
every config from all registred config;
* If ``name`` argument is given, just return its config and nothing
else;
Keyword Arguments:
name (string): Specific configuration name to return.
Raises:
NotRegisteredError: If given config name does not exist in
registry.
Returns:
dict: Configurations.
"""
if name:
if name not in self.registry:
msg = "Given config name '{}' is not registered."
raise NotRegisteredError(msg.format(name)) # depends on [control=['if'], data=['name']]
return {name: self.registry[name]} # depends on [control=['if'], data=[]]
return self.registry |
def page_down(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Pres page_down key n times.
**中文文档**
按 page_down 键n次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.page_down, n, interval)
self.delay(post_dl) | def function[page_down, parameter[self, n, interval, pre_dl, post_dl]]:
constant[Pres page_down key n times.
**中文文档**
按 page_down 键n次。
]
call[name[self].delay, parameter[name[pre_dl]]]
call[name[self].k.tap_key, parameter[name[self].k.page_down, name[n], name[interval]]]
call[name[self].delay, parameter[name[post_dl]]] | keyword[def] identifier[page_down] ( identifier[self] , identifier[n] = literal[int] , identifier[interval] = literal[int] , identifier[pre_dl] = keyword[None] , identifier[post_dl] = keyword[None] ):
literal[string]
identifier[self] . identifier[delay] ( identifier[pre_dl] )
identifier[self] . identifier[k] . identifier[tap_key] ( identifier[self] . identifier[k] . identifier[page_down] , identifier[n] , identifier[interval] )
identifier[self] . identifier[delay] ( identifier[post_dl] ) | def page_down(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Pres page_down key n times.
**中文文档**
按 page_down 键n次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.page_down, n, interval)
self.delay(post_dl) |
def observation(self, frame):
"""Add single zero row/column to observation if needed."""
if frame.shape == self.observation_space.shape:
return frame
else:
extended_frame = np.zeros(self.observation_space.shape,
self.observation_space.dtype)
assert self.HW_AXES == (0, 1)
extended_frame[:frame.shape[0], :frame.shape[1]] = frame
return extended_frame | def function[observation, parameter[self, frame]]:
constant[Add single zero row/column to observation if needed.]
if compare[name[frame].shape equal[==] name[self].observation_space.shape] begin[:]
return[name[frame]] | keyword[def] identifier[observation] ( identifier[self] , identifier[frame] ):
literal[string]
keyword[if] identifier[frame] . identifier[shape] == identifier[self] . identifier[observation_space] . identifier[shape] :
keyword[return] identifier[frame]
keyword[else] :
identifier[extended_frame] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[observation_space] . identifier[shape] ,
identifier[self] . identifier[observation_space] . identifier[dtype] )
keyword[assert] identifier[self] . identifier[HW_AXES] ==( literal[int] , literal[int] )
identifier[extended_frame] [: identifier[frame] . identifier[shape] [ literal[int] ],: identifier[frame] . identifier[shape] [ literal[int] ]]= identifier[frame]
keyword[return] identifier[extended_frame] | def observation(self, frame):
"""Add single zero row/column to observation if needed."""
if frame.shape == self.observation_space.shape:
return frame # depends on [control=['if'], data=[]]
else:
extended_frame = np.zeros(self.observation_space.shape, self.observation_space.dtype)
assert self.HW_AXES == (0, 1)
extended_frame[:frame.shape[0], :frame.shape[1]] = frame
return extended_frame |
def setup(self, fname):
"""Setup Run Configuration dialog with filename *fname*"""
self.filename = fname
self.runconfigoptions = RunConfigOptions(self)
self.runconfigoptions.set(RunConfiguration(fname).get())
self.add_widgets(self.runconfigoptions)
self.add_button_box(QDialogButtonBox.Cancel)
self.setWindowTitle(_("Run settings for %s") % osp.basename(fname)) | def function[setup, parameter[self, fname]]:
constant[Setup Run Configuration dialog with filename *fname*]
name[self].filename assign[=] name[fname]
name[self].runconfigoptions assign[=] call[name[RunConfigOptions], parameter[name[self]]]
call[name[self].runconfigoptions.set, parameter[call[call[name[RunConfiguration], parameter[name[fname]]].get, parameter[]]]]
call[name[self].add_widgets, parameter[name[self].runconfigoptions]]
call[name[self].add_button_box, parameter[name[QDialogButtonBox].Cancel]]
call[name[self].setWindowTitle, parameter[binary_operation[call[name[_], parameter[constant[Run settings for %s]]] <ast.Mod object at 0x7da2590d6920> call[name[osp].basename, parameter[name[fname]]]]]] | keyword[def] identifier[setup] ( identifier[self] , identifier[fname] ):
literal[string]
identifier[self] . identifier[filename] = identifier[fname]
identifier[self] . identifier[runconfigoptions] = identifier[RunConfigOptions] ( identifier[self] )
identifier[self] . identifier[runconfigoptions] . identifier[set] ( identifier[RunConfiguration] ( identifier[fname] ). identifier[get] ())
identifier[self] . identifier[add_widgets] ( identifier[self] . identifier[runconfigoptions] )
identifier[self] . identifier[add_button_box] ( identifier[QDialogButtonBox] . identifier[Cancel] )
identifier[self] . identifier[setWindowTitle] ( identifier[_] ( literal[string] )% identifier[osp] . identifier[basename] ( identifier[fname] )) | def setup(self, fname):
"""Setup Run Configuration dialog with filename *fname*"""
self.filename = fname
self.runconfigoptions = RunConfigOptions(self)
self.runconfigoptions.set(RunConfiguration(fname).get())
self.add_widgets(self.runconfigoptions)
self.add_button_box(QDialogButtonBox.Cancel)
self.setWindowTitle(_('Run settings for %s') % osp.basename(fname)) |
def get_scrapy_options(self):
"""
:return: all options listed in the config section 'Scrapy'
"""
if self.__scrapy_options is None:
self.__scrapy_options = {}
options = self.section("Scrapy")
for key, value in options.items():
self.__scrapy_options[key.upper()] = value
return self.__scrapy_options | def function[get_scrapy_options, parameter[self]]:
constant[
:return: all options listed in the config section 'Scrapy'
]
if compare[name[self].__scrapy_options is constant[None]] begin[:]
name[self].__scrapy_options assign[=] dictionary[[], []]
variable[options] assign[=] call[name[self].section, parameter[constant[Scrapy]]]
for taget[tuple[[<ast.Name object at 0x7da18dc9b4f0>, <ast.Name object at 0x7da18dc9bf40>]]] in starred[call[name[options].items, parameter[]]] begin[:]
call[name[self].__scrapy_options][call[name[key].upper, parameter[]]] assign[=] name[value]
return[name[self].__scrapy_options] | keyword[def] identifier[get_scrapy_options] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__scrapy_options] keyword[is] keyword[None] :
identifier[self] . identifier[__scrapy_options] ={}
identifier[options] = identifier[self] . identifier[section] ( literal[string] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[options] . identifier[items] ():
identifier[self] . identifier[__scrapy_options] [ identifier[key] . identifier[upper] ()]= identifier[value]
keyword[return] identifier[self] . identifier[__scrapy_options] | def get_scrapy_options(self):
"""
:return: all options listed in the config section 'Scrapy'
"""
if self.__scrapy_options is None:
self.__scrapy_options = {}
options = self.section('Scrapy')
for (key, value) in options.items():
self.__scrapy_options[key.upper()] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return self.__scrapy_options |
def _GetDataStreams(self):
"""Retrieves the data streams.
Returns:
list[NTFSDataStream]: data streams.
"""
if self._data_streams is None:
self._data_streams = []
if self._fsntfs_file_entry.has_default_data_stream():
data_stream = NTFSDataStream(None)
self._data_streams.append(data_stream)
for fsntfs_data_stream in self._fsntfs_file_entry.alternate_data_streams:
data_stream = NTFSDataStream(fsntfs_data_stream)
self._data_streams.append(data_stream)
return self._data_streams | def function[_GetDataStreams, parameter[self]]:
constant[Retrieves the data streams.
Returns:
list[NTFSDataStream]: data streams.
]
if compare[name[self]._data_streams is constant[None]] begin[:]
name[self]._data_streams assign[=] list[[]]
if call[name[self]._fsntfs_file_entry.has_default_data_stream, parameter[]] begin[:]
variable[data_stream] assign[=] call[name[NTFSDataStream], parameter[constant[None]]]
call[name[self]._data_streams.append, parameter[name[data_stream]]]
for taget[name[fsntfs_data_stream]] in starred[name[self]._fsntfs_file_entry.alternate_data_streams] begin[:]
variable[data_stream] assign[=] call[name[NTFSDataStream], parameter[name[fsntfs_data_stream]]]
call[name[self]._data_streams.append, parameter[name[data_stream]]]
return[name[self]._data_streams] | keyword[def] identifier[_GetDataStreams] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_data_streams] keyword[is] keyword[None] :
identifier[self] . identifier[_data_streams] =[]
keyword[if] identifier[self] . identifier[_fsntfs_file_entry] . identifier[has_default_data_stream] ():
identifier[data_stream] = identifier[NTFSDataStream] ( keyword[None] )
identifier[self] . identifier[_data_streams] . identifier[append] ( identifier[data_stream] )
keyword[for] identifier[fsntfs_data_stream] keyword[in] identifier[self] . identifier[_fsntfs_file_entry] . identifier[alternate_data_streams] :
identifier[data_stream] = identifier[NTFSDataStream] ( identifier[fsntfs_data_stream] )
identifier[self] . identifier[_data_streams] . identifier[append] ( identifier[data_stream] )
keyword[return] identifier[self] . identifier[_data_streams] | def _GetDataStreams(self):
"""Retrieves the data streams.
Returns:
list[NTFSDataStream]: data streams.
"""
if self._data_streams is None:
self._data_streams = []
if self._fsntfs_file_entry.has_default_data_stream():
data_stream = NTFSDataStream(None)
self._data_streams.append(data_stream) # depends on [control=['if'], data=[]]
for fsntfs_data_stream in self._fsntfs_file_entry.alternate_data_streams:
data_stream = NTFSDataStream(fsntfs_data_stream)
self._data_streams.append(data_stream) # depends on [control=['for'], data=['fsntfs_data_stream']] # depends on [control=['if'], data=[]]
return self._data_streams |
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip() | def function[get_subject, parameter[self, text]]:
constant[
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
]
variable[first_line] assign[=] call[call[name[text].splitlines, parameter[constant[True]]]][constant[0]]
if call[name[first_line].startswith, parameter[constant[SUBJECT:]]] begin[:]
variable[subject] assign[=] call[name[first_line]][<ast.Slice object at 0x7da18dc98910>]
return[call[name[subject].strip, parameter[]]] | keyword[def] identifier[get_subject] ( identifier[self] , identifier[text] ):
literal[string]
identifier[first_line] = identifier[text] . identifier[splitlines] ( keyword[True] )[ literal[int] ]
keyword[if] identifier[first_line] . identifier[startswith] ( literal[string] ):
identifier[subject] = identifier[first_line] [ identifier[len] ( literal[string] ):]
keyword[else] :
identifier[subject] = identifier[first_line]
keyword[return] identifier[subject] . identifier[strip] () | def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):] # depends on [control=['if'], data=[]]
else:
subject = first_line
return subject.strip() |
def get_submit_args(args):
"""Gets arguments for the `submit_and_verify` method."""
submit_args = dict(
testrun_id=args.testrun_id,
user=args.user,
password=args.password,
no_verify=args.no_verify,
verify_timeout=args.verify_timeout,
log_file=args.job_log,
dry_run=args.dry_run,
)
submit_args = {k: v for k, v in submit_args.items() if v is not None}
return Box(submit_args, frozen_box=True, default_box=True) | def function[get_submit_args, parameter[args]]:
constant[Gets arguments for the `submit_and_verify` method.]
variable[submit_args] assign[=] call[name[dict], parameter[]]
variable[submit_args] assign[=] <ast.DictComp object at 0x7da1b229be50>
return[call[name[Box], parameter[name[submit_args]]]] | keyword[def] identifier[get_submit_args] ( identifier[args] ):
literal[string]
identifier[submit_args] = identifier[dict] (
identifier[testrun_id] = identifier[args] . identifier[testrun_id] ,
identifier[user] = identifier[args] . identifier[user] ,
identifier[password] = identifier[args] . identifier[password] ,
identifier[no_verify] = identifier[args] . identifier[no_verify] ,
identifier[verify_timeout] = identifier[args] . identifier[verify_timeout] ,
identifier[log_file] = identifier[args] . identifier[job_log] ,
identifier[dry_run] = identifier[args] . identifier[dry_run] ,
)
identifier[submit_args] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[submit_args] . identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] }
keyword[return] identifier[Box] ( identifier[submit_args] , identifier[frozen_box] = keyword[True] , identifier[default_box] = keyword[True] ) | def get_submit_args(args):
"""Gets arguments for the `submit_and_verify` method."""
submit_args = dict(testrun_id=args.testrun_id, user=args.user, password=args.password, no_verify=args.no_verify, verify_timeout=args.verify_timeout, log_file=args.job_log, dry_run=args.dry_run)
submit_args = {k: v for (k, v) in submit_args.items() if v is not None}
return Box(submit_args, frozen_box=True, default_box=True) |
def add_many_to_dict_val_set(dict_obj, key, val_list):
"""Adds the given value list to the set mapped by the given key.
If the key is missing from the dict, the given mapping is added.
Example
-------
>>> dict_obj = {'a': set([1, 2])}
>>> add_many_to_dict_val_set(dict_obj, 'a', [2, 3])
>>> print(dict_obj['a'])
{1, 2, 3}
>>> add_many_to_dict_val_set(dict_obj, 'b', [2, 3])
>>> print(dict_obj['b'])
{2, 3}
"""
try:
dict_obj[key].update(val_list)
except KeyError:
dict_obj[key] = set(val_list) | def function[add_many_to_dict_val_set, parameter[dict_obj, key, val_list]]:
constant[Adds the given value list to the set mapped by the given key.
If the key is missing from the dict, the given mapping is added.
Example
-------
>>> dict_obj = {'a': set([1, 2])}
>>> add_many_to_dict_val_set(dict_obj, 'a', [2, 3])
>>> print(dict_obj['a'])
{1, 2, 3}
>>> add_many_to_dict_val_set(dict_obj, 'b', [2, 3])
>>> print(dict_obj['b'])
{2, 3}
]
<ast.Try object at 0x7da20c990b50> | keyword[def] identifier[add_many_to_dict_val_set] ( identifier[dict_obj] , identifier[key] , identifier[val_list] ):
literal[string]
keyword[try] :
identifier[dict_obj] [ identifier[key] ]. identifier[update] ( identifier[val_list] )
keyword[except] identifier[KeyError] :
identifier[dict_obj] [ identifier[key] ]= identifier[set] ( identifier[val_list] ) | def add_many_to_dict_val_set(dict_obj, key, val_list):
"""Adds the given value list to the set mapped by the given key.
If the key is missing from the dict, the given mapping is added.
Example
-------
>>> dict_obj = {'a': set([1, 2])}
>>> add_many_to_dict_val_set(dict_obj, 'a', [2, 3])
>>> print(dict_obj['a'])
{1, 2, 3}
>>> add_many_to_dict_val_set(dict_obj, 'b', [2, 3])
>>> print(dict_obj['b'])
{2, 3}
"""
try:
dict_obj[key].update(val_list) # depends on [control=['try'], data=[]]
except KeyError:
dict_obj[key] = set(val_list) # depends on [control=['except'], data=[]] |
def dns_resource_reference(self):
"""Instance depends on the API version:
* 2018-05-01: :class:`DnsResourceReferenceOperations<azure.mgmt.dns.v2018_05_01.operations.DnsResourceReferenceOperations>`
"""
api_version = self._get_api_version('dns_resource_reference')
if api_version == '2018-05-01':
from .v2018_05_01.operations import DnsResourceReferenceOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | def function[dns_resource_reference, parameter[self]]:
constant[Instance depends on the API version:
* 2018-05-01: :class:`DnsResourceReferenceOperations<azure.mgmt.dns.v2018_05_01.operations.DnsResourceReferenceOperations>`
]
variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[dns_resource_reference]]]
if compare[name[api_version] equal[==] constant[2018-05-01]] begin[:]
from relative_module[v2018_05_01.operations] import module[DnsResourceReferenceOperations]
return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]] | keyword[def] identifier[dns_resource_reference] ( identifier[self] ):
literal[string]
identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] )
keyword[if] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_05_01] . identifier[operations] keyword[import] identifier[DnsResourceReferenceOperations] keyword[as] identifier[OperationClass]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] ))
keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] ))) | def dns_resource_reference(self):
"""Instance depends on the API version:
* 2018-05-01: :class:`DnsResourceReferenceOperations<azure.mgmt.dns.v2018_05_01.operations.DnsResourceReferenceOperations>`
"""
api_version = self._get_api_version('dns_resource_reference')
if api_version == '2018-05-01':
from .v2018_05_01.operations import DnsResourceReferenceOperations as OperationClass # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('APIVersion {} is not available'.format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def detect_log_config(arguments):
"""
Detect access log config (path and format) of nginx. Offer user to select if multiple access logs are detected.
:return: path and format of detected / selected access log
"""
config = arguments['--config']
if config is None:
config = detect_config_path()
if not os.path.exists(config):
error_exit('Nginx config file not found: %s' % config)
with open(config) as f:
config_str = f.read()
access_logs = dict(get_access_logs(config_str))
if not access_logs:
error_exit('Access log file is not provided and ngxtop cannot detect it from your config file (%s).' % config)
log_formats = dict(get_log_formats(config_str))
if len(access_logs) == 1:
log_path, format_name = list(access_logs.items())[0]
if format_name == 'combined':
return log_path, LOG_FORMAT_COMBINED
if format_name not in log_formats:
error_exit('Incorrect format name set in config for access log file "%s"' % log_path)
return log_path, log_formats[format_name]
# multiple access logs configured, offer to select one
print('Multiple access logs detected in configuration:')
log_path = choose_one(list(access_logs.keys()), 'Select access log file to process: ')
format_name = access_logs[log_path]
if format_name not in log_formats:
error_exit('Incorrect format name set in config for access log file "%s"' % log_path)
return log_path, log_formats[format_name] | def function[detect_log_config, parameter[arguments]]:
constant[
Detect access log config (path and format) of nginx. Offer user to select if multiple access logs are detected.
:return: path and format of detected / selected access log
]
variable[config] assign[=] call[name[arguments]][constant[--config]]
if compare[name[config] is constant[None]] begin[:]
variable[config] assign[=] call[name[detect_config_path], parameter[]]
if <ast.UnaryOp object at 0x7da1b1881150> begin[:]
call[name[error_exit], parameter[binary_operation[constant[Nginx config file not found: %s] <ast.Mod object at 0x7da2590d6920> name[config]]]]
with call[name[open], parameter[name[config]]] begin[:]
variable[config_str] assign[=] call[name[f].read, parameter[]]
variable[access_logs] assign[=] call[name[dict], parameter[call[name[get_access_logs], parameter[name[config_str]]]]]
if <ast.UnaryOp object at 0x7da1b1881d80> begin[:]
call[name[error_exit], parameter[binary_operation[constant[Access log file is not provided and ngxtop cannot detect it from your config file (%s).] <ast.Mod object at 0x7da2590d6920> name[config]]]]
variable[log_formats] assign[=] call[name[dict], parameter[call[name[get_log_formats], parameter[name[config_str]]]]]
if compare[call[name[len], parameter[name[access_logs]]] equal[==] constant[1]] begin[:]
<ast.Tuple object at 0x7da1b1881360> assign[=] call[call[name[list], parameter[call[name[access_logs].items, parameter[]]]]][constant[0]]
if compare[name[format_name] equal[==] constant[combined]] begin[:]
return[tuple[[<ast.Name object at 0x7da18ede6e60>, <ast.Name object at 0x7da18ede7940>]]]
if compare[name[format_name] <ast.NotIn object at 0x7da2590d7190> name[log_formats]] begin[:]
call[name[error_exit], parameter[binary_operation[constant[Incorrect format name set in config for access log file "%s"] <ast.Mod object at 0x7da2590d6920> name[log_path]]]]
return[tuple[[<ast.Name object at 0x7da18ede49a0>, <ast.Subscript object at 0x7da18ede5ea0>]]]
call[name[print], parameter[constant[Multiple access logs detected in configuration:]]]
variable[log_path] assign[=] call[name[choose_one], parameter[call[name[list], parameter[call[name[access_logs].keys, parameter[]]]], constant[Select access log file to process: ]]]
variable[format_name] assign[=] call[name[access_logs]][name[log_path]]
if compare[name[format_name] <ast.NotIn object at 0x7da2590d7190> name[log_formats]] begin[:]
call[name[error_exit], parameter[binary_operation[constant[Incorrect format name set in config for access log file "%s"] <ast.Mod object at 0x7da2590d6920> name[log_path]]]]
return[tuple[[<ast.Name object at 0x7da1b184ceb0>, <ast.Subscript object at 0x7da1b184d210>]]] | keyword[def] identifier[detect_log_config] ( identifier[arguments] ):
literal[string]
identifier[config] = identifier[arguments] [ literal[string] ]
keyword[if] identifier[config] keyword[is] keyword[None] :
identifier[config] = identifier[detect_config_path] ()
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[config] ):
identifier[error_exit] ( literal[string] % identifier[config] )
keyword[with] identifier[open] ( identifier[config] ) keyword[as] identifier[f] :
identifier[config_str] = identifier[f] . identifier[read] ()
identifier[access_logs] = identifier[dict] ( identifier[get_access_logs] ( identifier[config_str] ))
keyword[if] keyword[not] identifier[access_logs] :
identifier[error_exit] ( literal[string] % identifier[config] )
identifier[log_formats] = identifier[dict] ( identifier[get_log_formats] ( identifier[config_str] ))
keyword[if] identifier[len] ( identifier[access_logs] )== literal[int] :
identifier[log_path] , identifier[format_name] = identifier[list] ( identifier[access_logs] . identifier[items] ())[ literal[int] ]
keyword[if] identifier[format_name] == literal[string] :
keyword[return] identifier[log_path] , identifier[LOG_FORMAT_COMBINED]
keyword[if] identifier[format_name] keyword[not] keyword[in] identifier[log_formats] :
identifier[error_exit] ( literal[string] % identifier[log_path] )
keyword[return] identifier[log_path] , identifier[log_formats] [ identifier[format_name] ]
identifier[print] ( literal[string] )
identifier[log_path] = identifier[choose_one] ( identifier[list] ( identifier[access_logs] . identifier[keys] ()), literal[string] )
identifier[format_name] = identifier[access_logs] [ identifier[log_path] ]
keyword[if] identifier[format_name] keyword[not] keyword[in] identifier[log_formats] :
identifier[error_exit] ( literal[string] % identifier[log_path] )
keyword[return] identifier[log_path] , identifier[log_formats] [ identifier[format_name] ] | def detect_log_config(arguments):
"""
Detect access log config (path and format) of nginx. Offer user to select if multiple access logs are detected.
:return: path and format of detected / selected access log
"""
config = arguments['--config']
if config is None:
config = detect_config_path() # depends on [control=['if'], data=['config']]
if not os.path.exists(config):
error_exit('Nginx config file not found: %s' % config) # depends on [control=['if'], data=[]]
with open(config) as f:
config_str = f.read() # depends on [control=['with'], data=['f']]
access_logs = dict(get_access_logs(config_str))
if not access_logs:
error_exit('Access log file is not provided and ngxtop cannot detect it from your config file (%s).' % config) # depends on [control=['if'], data=[]]
log_formats = dict(get_log_formats(config_str))
if len(access_logs) == 1:
(log_path, format_name) = list(access_logs.items())[0]
if format_name == 'combined':
return (log_path, LOG_FORMAT_COMBINED) # depends on [control=['if'], data=[]]
if format_name not in log_formats:
error_exit('Incorrect format name set in config for access log file "%s"' % log_path) # depends on [control=['if'], data=[]]
return (log_path, log_formats[format_name]) # depends on [control=['if'], data=[]]
# multiple access logs configured, offer to select one
print('Multiple access logs detected in configuration:')
log_path = choose_one(list(access_logs.keys()), 'Select access log file to process: ')
format_name = access_logs[log_path]
if format_name not in log_formats:
error_exit('Incorrect format name set in config for access log file "%s"' % log_path) # depends on [control=['if'], data=[]]
return (log_path, log_formats[format_name]) |
def command(self, payload):
"""
Send a command to i3. See the `list of commands
<http://i3wm.org/docs/userguide.html#_list_of_commands>`_ in the user
guide for available commands. Pass the text of the command to execute
as the first arguments. This is essentially the same as using
``i3-msg`` or an ``exec`` block in your i3 config to control the
window manager.
:rtype: List of :class:`CommandReply` or None if the command causes i3
to restart or exit and does not give a reply.
"""
data = self.message(MessageType.COMMAND, payload)
if data:
return json.loads(data, object_hook=CommandReply)
else:
return None | def function[command, parameter[self, payload]]:
constant[
Send a command to i3. See the `list of commands
<http://i3wm.org/docs/userguide.html#_list_of_commands>`_ in the user
guide for available commands. Pass the text of the command to execute
as the first arguments. This is essentially the same as using
``i3-msg`` or an ``exec`` block in your i3 config to control the
window manager.
:rtype: List of :class:`CommandReply` or None if the command causes i3
to restart or exit and does not give a reply.
]
variable[data] assign[=] call[name[self].message, parameter[name[MessageType].COMMAND, name[payload]]]
if name[data] begin[:]
return[call[name[json].loads, parameter[name[data]]]] | keyword[def] identifier[command] ( identifier[self] , identifier[payload] ):
literal[string]
identifier[data] = identifier[self] . identifier[message] ( identifier[MessageType] . identifier[COMMAND] , identifier[payload] )
keyword[if] identifier[data] :
keyword[return] identifier[json] . identifier[loads] ( identifier[data] , identifier[object_hook] = identifier[CommandReply] )
keyword[else] :
keyword[return] keyword[None] | def command(self, payload):
"""
Send a command to i3. See the `list of commands
<http://i3wm.org/docs/userguide.html#_list_of_commands>`_ in the user
guide for available commands. Pass the text of the command to execute
as the first arguments. This is essentially the same as using
``i3-msg`` or an ``exec`` block in your i3 config to control the
window manager.
:rtype: List of :class:`CommandReply` or None if the command causes i3
to restart or exit and does not give a reply.
"""
data = self.message(MessageType.COMMAND, payload)
if data:
return json.loads(data, object_hook=CommandReply) # depends on [control=['if'], data=[]]
else:
return None |
def sorted_items(d, key=__identity, reverse=False):
"""
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
"""
# wrap the key func so it operates on the first element of each item
def pairkey_key(item):
return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse) | def function[sorted_items, parameter[d, key, reverse]]:
constant[
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
]
def function[pairkey_key, parameter[item]]:
return[call[name[key], parameter[call[name[item]][constant[0]]]]]
return[call[name[sorted], parameter[call[name[d].items, parameter[]]]]] | keyword[def] identifier[sorted_items] ( identifier[d] , identifier[key] = identifier[__identity] , identifier[reverse] = keyword[False] ):
literal[string]
keyword[def] identifier[pairkey_key] ( identifier[item] ):
keyword[return] identifier[key] ( identifier[item] [ literal[int] ])
keyword[return] identifier[sorted] ( identifier[d] . identifier[items] (), identifier[key] = identifier[pairkey_key] , identifier[reverse] = identifier[reverse] ) | def sorted_items(d, key=__identity, reverse=False):
"""
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
""" # wrap the key func so it operates on the first element of each item
def pairkey_key(item):
return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse) |
def calc_rho_and_rho_bar_squared(final_log_likelihood,
null_log_likelihood,
num_est_parameters):
"""
Calculates McFadden's rho-squared and rho-bar squared for the given model.
Parameters
----------
final_log_likelihood : float.
The final log-likelihood of the model whose rho-squared and rho-bar
squared are being calculated for.
null_log_likelihood : float.
The log-likelihood of the model in question, when all parameters are
zero or their 'base' values.
num_est_parameters : int.
The number of parameters estimated in this model.
Returns
-------
`(rho_squared, rho_bar_squared)` : tuple of floats.
The rho-squared and rho-bar-squared for the model.
"""
rho_squared = 1.0 - final_log_likelihood / null_log_likelihood
rho_bar_squared = 1.0 - ((final_log_likelihood - num_est_parameters) /
null_log_likelihood)
return rho_squared, rho_bar_squared | def function[calc_rho_and_rho_bar_squared, parameter[final_log_likelihood, null_log_likelihood, num_est_parameters]]:
constant[
Calculates McFadden's rho-squared and rho-bar squared for the given model.
Parameters
----------
final_log_likelihood : float.
The final log-likelihood of the model whose rho-squared and rho-bar
squared are being calculated for.
null_log_likelihood : float.
The log-likelihood of the model in question, when all parameters are
zero or their 'base' values.
num_est_parameters : int.
The number of parameters estimated in this model.
Returns
-------
`(rho_squared, rho_bar_squared)` : tuple of floats.
The rho-squared and rho-bar-squared for the model.
]
variable[rho_squared] assign[=] binary_operation[constant[1.0] - binary_operation[name[final_log_likelihood] / name[null_log_likelihood]]]
variable[rho_bar_squared] assign[=] binary_operation[constant[1.0] - binary_operation[binary_operation[name[final_log_likelihood] - name[num_est_parameters]] / name[null_log_likelihood]]]
return[tuple[[<ast.Name object at 0x7da1b15ac7c0>, <ast.Name object at 0x7da1b15ac790>]]] | keyword[def] identifier[calc_rho_and_rho_bar_squared] ( identifier[final_log_likelihood] ,
identifier[null_log_likelihood] ,
identifier[num_est_parameters] ):
literal[string]
identifier[rho_squared] = literal[int] - identifier[final_log_likelihood] / identifier[null_log_likelihood]
identifier[rho_bar_squared] = literal[int] -(( identifier[final_log_likelihood] - identifier[num_est_parameters] )/
identifier[null_log_likelihood] )
keyword[return] identifier[rho_squared] , identifier[rho_bar_squared] | def calc_rho_and_rho_bar_squared(final_log_likelihood, null_log_likelihood, num_est_parameters):
"""
Calculates McFadden's rho-squared and rho-bar squared for the given model.
Parameters
----------
final_log_likelihood : float.
The final log-likelihood of the model whose rho-squared and rho-bar
squared are being calculated for.
null_log_likelihood : float.
The log-likelihood of the model in question, when all parameters are
zero or their 'base' values.
num_est_parameters : int.
The number of parameters estimated in this model.
Returns
-------
`(rho_squared, rho_bar_squared)` : tuple of floats.
The rho-squared and rho-bar-squared for the model.
"""
rho_squared = 1.0 - final_log_likelihood / null_log_likelihood
rho_bar_squared = 1.0 - (final_log_likelihood - num_est_parameters) / null_log_likelihood
return (rho_squared, rho_bar_squared) |
def connect(self, address, token=None):
"""
Connect the underlying websocket to the address,
send a handshake and optionally a token packet.
Returns `True` if connected, `False` if the connection failed.
:param address: string, `IP:PORT`
:param token: unique token, required by official servers,
acquired through utils.find_server()
:return: True if connected, False if not
"""
if self.connected:
self.subscriber.on_connect_error(
'Already connected to "%s"' % self.address)
return False
self.address = address
self.server_token = token
self.ingame = False
self.ws.settimeout(1)
self.ws.connect('ws://%s' % self.address, origin='http://agar.io')
if not self.connected:
self.subscriber.on_connect_error(
'Failed to connect to "%s"' % self.address)
return False
self.subscriber.on_sock_open()
# allow handshake canceling
if not self.connected:
self.subscriber.on_connect_error(
'Disconnected before sending handshake')
return False
self.send_handshake()
if self.server_token:
self.send_token(self.server_token)
old_nick = self.player.nick
self.player.reset()
self.world.reset()
self.player.nick = old_nick
return True | def function[connect, parameter[self, address, token]]:
constant[
Connect the underlying websocket to the address,
send a handshake and optionally a token packet.
Returns `True` if connected, `False` if the connection failed.
:param address: string, `IP:PORT`
:param token: unique token, required by official servers,
acquired through utils.find_server()
:return: True if connected, False if not
]
if name[self].connected begin[:]
call[name[self].subscriber.on_connect_error, parameter[binary_operation[constant[Already connected to "%s"] <ast.Mod object at 0x7da2590d6920> name[self].address]]]
return[constant[False]]
name[self].address assign[=] name[address]
name[self].server_token assign[=] name[token]
name[self].ingame assign[=] constant[False]
call[name[self].ws.settimeout, parameter[constant[1]]]
call[name[self].ws.connect, parameter[binary_operation[constant[ws://%s] <ast.Mod object at 0x7da2590d6920> name[self].address]]]
if <ast.UnaryOp object at 0x7da20c7c8bb0> begin[:]
call[name[self].subscriber.on_connect_error, parameter[binary_operation[constant[Failed to connect to "%s"] <ast.Mod object at 0x7da2590d6920> name[self].address]]]
return[constant[False]]
call[name[self].subscriber.on_sock_open, parameter[]]
if <ast.UnaryOp object at 0x7da18f720820> begin[:]
call[name[self].subscriber.on_connect_error, parameter[constant[Disconnected before sending handshake]]]
return[constant[False]]
call[name[self].send_handshake, parameter[]]
if name[self].server_token begin[:]
call[name[self].send_token, parameter[name[self].server_token]]
variable[old_nick] assign[=] name[self].player.nick
call[name[self].player.reset, parameter[]]
call[name[self].world.reset, parameter[]]
name[self].player.nick assign[=] name[old_nick]
return[constant[True]] | keyword[def] identifier[connect] ( identifier[self] , identifier[address] , identifier[token] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[connected] :
identifier[self] . identifier[subscriber] . identifier[on_connect_error] (
literal[string] % identifier[self] . identifier[address] )
keyword[return] keyword[False]
identifier[self] . identifier[address] = identifier[address]
identifier[self] . identifier[server_token] = identifier[token]
identifier[self] . identifier[ingame] = keyword[False]
identifier[self] . identifier[ws] . identifier[settimeout] ( literal[int] )
identifier[self] . identifier[ws] . identifier[connect] ( literal[string] % identifier[self] . identifier[address] , identifier[origin] = literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[connected] :
identifier[self] . identifier[subscriber] . identifier[on_connect_error] (
literal[string] % identifier[self] . identifier[address] )
keyword[return] keyword[False]
identifier[self] . identifier[subscriber] . identifier[on_sock_open] ()
keyword[if] keyword[not] identifier[self] . identifier[connected] :
identifier[self] . identifier[subscriber] . identifier[on_connect_error] (
literal[string] )
keyword[return] keyword[False]
identifier[self] . identifier[send_handshake] ()
keyword[if] identifier[self] . identifier[server_token] :
identifier[self] . identifier[send_token] ( identifier[self] . identifier[server_token] )
identifier[old_nick] = identifier[self] . identifier[player] . identifier[nick]
identifier[self] . identifier[player] . identifier[reset] ()
identifier[self] . identifier[world] . identifier[reset] ()
identifier[self] . identifier[player] . identifier[nick] = identifier[old_nick]
keyword[return] keyword[True] | def connect(self, address, token=None):
"""
Connect the underlying websocket to the address,
send a handshake and optionally a token packet.
Returns `True` if connected, `False` if the connection failed.
:param address: string, `IP:PORT`
:param token: unique token, required by official servers,
acquired through utils.find_server()
:return: True if connected, False if not
"""
if self.connected:
self.subscriber.on_connect_error('Already connected to "%s"' % self.address)
return False # depends on [control=['if'], data=[]]
self.address = address
self.server_token = token
self.ingame = False
self.ws.settimeout(1)
self.ws.connect('ws://%s' % self.address, origin='http://agar.io')
if not self.connected:
self.subscriber.on_connect_error('Failed to connect to "%s"' % self.address)
return False # depends on [control=['if'], data=[]]
self.subscriber.on_sock_open()
# allow handshake canceling
if not self.connected:
self.subscriber.on_connect_error('Disconnected before sending handshake')
return False # depends on [control=['if'], data=[]]
self.send_handshake()
if self.server_token:
self.send_token(self.server_token) # depends on [control=['if'], data=[]]
old_nick = self.player.nick
self.player.reset()
self.world.reset()
self.player.nick = old_nick
return True |
def get_parser(self):
"""
Returns :class:`monolith.cli.Parser` instance for this
*ExecutionManager*.
"""
parser = self.parser_cls(prog=self.prog_name, usage=self.get_usage(),
stream=self.stderr)
subparsers = parser.add_subparsers(
title='subcommands',
)
for name, command in self.registry.items():
cmdparser = subparsers.add_parser(name, help=command.help)
for argument in command.get_args():
cmdparser.add_argument(*argument.args, **argument.kwargs)
command.setup_parser(parser, cmdparser)
cmdparser.set_defaults(func=command.handle)
return parser | def function[get_parser, parameter[self]]:
constant[
Returns :class:`monolith.cli.Parser` instance for this
*ExecutionManager*.
]
variable[parser] assign[=] call[name[self].parser_cls, parameter[]]
variable[subparsers] assign[=] call[name[parser].add_subparsers, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bcc9fc0>, <ast.Name object at 0x7da18bccb8e0>]]] in starred[call[name[self].registry.items, parameter[]]] begin[:]
variable[cmdparser] assign[=] call[name[subparsers].add_parser, parameter[name[name]]]
for taget[name[argument]] in starred[call[name[command].get_args, parameter[]]] begin[:]
call[name[cmdparser].add_argument, parameter[<ast.Starred object at 0x7da18bcc8c10>]]
call[name[command].setup_parser, parameter[name[parser], name[cmdparser]]]
call[name[cmdparser].set_defaults, parameter[]]
return[name[parser]] | keyword[def] identifier[get_parser] ( identifier[self] ):
literal[string]
identifier[parser] = identifier[self] . identifier[parser_cls] ( identifier[prog] = identifier[self] . identifier[prog_name] , identifier[usage] = identifier[self] . identifier[get_usage] (),
identifier[stream] = identifier[self] . identifier[stderr] )
identifier[subparsers] = identifier[parser] . identifier[add_subparsers] (
identifier[title] = literal[string] ,
)
keyword[for] identifier[name] , identifier[command] keyword[in] identifier[self] . identifier[registry] . identifier[items] ():
identifier[cmdparser] = identifier[subparsers] . identifier[add_parser] ( identifier[name] , identifier[help] = identifier[command] . identifier[help] )
keyword[for] identifier[argument] keyword[in] identifier[command] . identifier[get_args] ():
identifier[cmdparser] . identifier[add_argument] (* identifier[argument] . identifier[args] ,** identifier[argument] . identifier[kwargs] )
identifier[command] . identifier[setup_parser] ( identifier[parser] , identifier[cmdparser] )
identifier[cmdparser] . identifier[set_defaults] ( identifier[func] = identifier[command] . identifier[handle] )
keyword[return] identifier[parser] | def get_parser(self):
"""
Returns :class:`monolith.cli.Parser` instance for this
*ExecutionManager*.
"""
parser = self.parser_cls(prog=self.prog_name, usage=self.get_usage(), stream=self.stderr)
subparsers = parser.add_subparsers(title='subcommands')
for (name, command) in self.registry.items():
cmdparser = subparsers.add_parser(name, help=command.help)
for argument in command.get_args():
cmdparser.add_argument(*argument.args, **argument.kwargs) # depends on [control=['for'], data=['argument']]
command.setup_parser(parser, cmdparser)
cmdparser.set_defaults(func=command.handle) # depends on [control=['for'], data=[]]
return parser |
def __request(self, method, url, request_args, headers=None, stream=False):
"""__request.
make the actual request. This method is called by the
request method in case of 'regular' API-calls. Or indirectly by
the__stream_request method if it concerns a 'streaming' call.
"""
func = getattr(self.client, method)
headers = headers if headers else {}
response = None
try:
logger.info("performing request %s", url)
response = func(url, stream=stream, headers=headers,
**request_args)
except requests.RequestException as err:
logger.error("request %s failed [%s]", url, err)
raise err
# Handle error responses
if response.status_code >= 400:
logger.error("request %s failed [%d,%s]",
url,
response.status_code,
response.content.decode('utf-8'))
raise V20Error(response.status_code,
response.content.decode('utf-8'))
return response | def function[__request, parameter[self, method, url, request_args, headers, stream]]:
constant[__request.
make the actual request. This method is called by the
request method in case of 'regular' API-calls. Or indirectly by
the__stream_request method if it concerns a 'streaming' call.
]
variable[func] assign[=] call[name[getattr], parameter[name[self].client, name[method]]]
variable[headers] assign[=] <ast.IfExp object at 0x7da20e960df0>
variable[response] assign[=] constant[None]
<ast.Try object at 0x7da20e961750>
if compare[name[response].status_code greater_or_equal[>=] constant[400]] begin[:]
call[name[logger].error, parameter[constant[request %s failed [%d,%s]], name[url], name[response].status_code, call[name[response].content.decode, parameter[constant[utf-8]]]]]
<ast.Raise object at 0x7da20e9637f0>
return[name[response]] | keyword[def] identifier[__request] ( identifier[self] , identifier[method] , identifier[url] , identifier[request_args] , identifier[headers] = keyword[None] , identifier[stream] = keyword[False] ):
literal[string]
identifier[func] = identifier[getattr] ( identifier[self] . identifier[client] , identifier[method] )
identifier[headers] = identifier[headers] keyword[if] identifier[headers] keyword[else] {}
identifier[response] = keyword[None]
keyword[try] :
identifier[logger] . identifier[info] ( literal[string] , identifier[url] )
identifier[response] = identifier[func] ( identifier[url] , identifier[stream] = identifier[stream] , identifier[headers] = identifier[headers] ,
** identifier[request_args] )
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[err] :
identifier[logger] . identifier[error] ( literal[string] , identifier[url] , identifier[err] )
keyword[raise] identifier[err]
keyword[if] identifier[response] . identifier[status_code] >= literal[int] :
identifier[logger] . identifier[error] ( literal[string] ,
identifier[url] ,
identifier[response] . identifier[status_code] ,
identifier[response] . identifier[content] . identifier[decode] ( literal[string] ))
keyword[raise] identifier[V20Error] ( identifier[response] . identifier[status_code] ,
identifier[response] . identifier[content] . identifier[decode] ( literal[string] ))
keyword[return] identifier[response] | def __request(self, method, url, request_args, headers=None, stream=False):
"""__request.
make the actual request. This method is called by the
request method in case of 'regular' API-calls. Or indirectly by
the__stream_request method if it concerns a 'streaming' call.
"""
func = getattr(self.client, method)
headers = headers if headers else {}
response = None
try:
logger.info('performing request %s', url)
response = func(url, stream=stream, headers=headers, **request_args) # depends on [control=['try'], data=[]]
except requests.RequestException as err:
logger.error('request %s failed [%s]', url, err)
raise err # depends on [control=['except'], data=['err']]
# Handle error responses
if response.status_code >= 400:
logger.error('request %s failed [%d,%s]', url, response.status_code, response.content.decode('utf-8'))
raise V20Error(response.status_code, response.content.decode('utf-8')) # depends on [control=['if'], data=[]]
return response |
def path_to_text(self, path):
'''
Transform local PDF file to string.
Args:
path: path to PDF file.
Returns:
string.
'''
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
pages_data = PDFPage.get_pages(
fp,
pagenos,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True
)
for page in pages_data:
interpreter.process_page(page)
text = retstr.getvalue()
text = text.replace("\n", "")
fp.close()
device.close()
retstr.close()
return text | def function[path_to_text, parameter[self, path]]:
constant[
Transform local PDF file to string.
Args:
path: path to PDF file.
Returns:
string.
]
variable[rsrcmgr] assign[=] call[name[PDFResourceManager], parameter[]]
variable[retstr] assign[=] call[name[StringIO], parameter[]]
variable[codec] assign[=] constant[utf-8]
variable[laparams] assign[=] call[name[LAParams], parameter[]]
variable[device] assign[=] call[name[TextConverter], parameter[name[rsrcmgr], name[retstr]]]
variable[fp] assign[=] call[name[open], parameter[name[path], constant[rb]]]
variable[interpreter] assign[=] call[name[PDFPageInterpreter], parameter[name[rsrcmgr], name[device]]]
variable[password] assign[=] constant[]
variable[maxpages] assign[=] constant[0]
variable[caching] assign[=] constant[True]
variable[pagenos] assign[=] call[name[set], parameter[]]
variable[pages_data] assign[=] call[name[PDFPage].get_pages, parameter[name[fp], name[pagenos]]]
for taget[name[page]] in starred[name[pages_data]] begin[:]
call[name[interpreter].process_page, parameter[name[page]]]
variable[text] assign[=] call[name[retstr].getvalue, parameter[]]
variable[text] assign[=] call[name[text].replace, parameter[constant[
], constant[]]]
call[name[fp].close, parameter[]]
call[name[device].close, parameter[]]
call[name[retstr].close, parameter[]]
return[name[text]] | keyword[def] identifier[path_to_text] ( identifier[self] , identifier[path] ):
literal[string]
identifier[rsrcmgr] = identifier[PDFResourceManager] ()
identifier[retstr] = identifier[StringIO] ()
identifier[codec] = literal[string]
identifier[laparams] = identifier[LAParams] ()
identifier[device] = identifier[TextConverter] ( identifier[rsrcmgr] , identifier[retstr] , identifier[codec] = identifier[codec] , identifier[laparams] = identifier[laparams] )
identifier[fp] = identifier[open] ( identifier[path] , literal[string] )
identifier[interpreter] = identifier[PDFPageInterpreter] ( identifier[rsrcmgr] , identifier[device] )
identifier[password] = literal[string]
identifier[maxpages] = literal[int]
identifier[caching] = keyword[True]
identifier[pagenos] = identifier[set] ()
identifier[pages_data] = identifier[PDFPage] . identifier[get_pages] (
identifier[fp] ,
identifier[pagenos] ,
identifier[maxpages] = identifier[maxpages] ,
identifier[password] = identifier[password] ,
identifier[caching] = identifier[caching] ,
identifier[check_extractable] = keyword[True]
)
keyword[for] identifier[page] keyword[in] identifier[pages_data] :
identifier[interpreter] . identifier[process_page] ( identifier[page] )
identifier[text] = identifier[retstr] . identifier[getvalue] ()
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] )
identifier[fp] . identifier[close] ()
identifier[device] . identifier[close] ()
identifier[retstr] . identifier[close] ()
keyword[return] identifier[text] | def path_to_text(self, path):
"""
Transform local PDF file to string.
Args:
path: path to PDF file.
Returns:
string.
"""
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ''
maxpages = 0
caching = True
pagenos = set()
pages_data = PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True)
for page in pages_data:
interpreter.process_page(page) # depends on [control=['for'], data=['page']]
text = retstr.getvalue()
text = text.replace('\n', '')
fp.close()
device.close()
retstr.close()
return text |
def set_interactive_policy(*, locals=None, banner=None, serve=None,
prompt_control=None):
"""Use an interactive event loop by default."""
policy = InteractiveEventLoopPolicy(
locals=locals,
banner=banner,
serve=serve,
prompt_control=prompt_control)
asyncio.set_event_loop_policy(policy) | def function[set_interactive_policy, parameter[]]:
constant[Use an interactive event loop by default.]
variable[policy] assign[=] call[name[InteractiveEventLoopPolicy], parameter[]]
call[name[asyncio].set_event_loop_policy, parameter[name[policy]]] | keyword[def] identifier[set_interactive_policy] (*, identifier[locals] = keyword[None] , identifier[banner] = keyword[None] , identifier[serve] = keyword[None] ,
identifier[prompt_control] = keyword[None] ):
literal[string]
identifier[policy] = identifier[InteractiveEventLoopPolicy] (
identifier[locals] = identifier[locals] ,
identifier[banner] = identifier[banner] ,
identifier[serve] = identifier[serve] ,
identifier[prompt_control] = identifier[prompt_control] )
identifier[asyncio] . identifier[set_event_loop_policy] ( identifier[policy] ) | def set_interactive_policy(*, locals=None, banner=None, serve=None, prompt_control=None):
"""Use an interactive event loop by default."""
policy = InteractiveEventLoopPolicy(locals=locals, banner=banner, serve=serve, prompt_control=prompt_control)
asyncio.set_event_loop_policy(policy) |
def topics(self):
""" Ordered dictionary with path:topic ordered by path """
topics_sorted = sorted(self._topics.items(), key=lambda t: t[0])
return MappingProxyType(OrderedDict(topics_sorted)) | def function[topics, parameter[self]]:
constant[ Ordered dictionary with path:topic ordered by path ]
variable[topics_sorted] assign[=] call[name[sorted], parameter[call[name[self]._topics.items, parameter[]]]]
return[call[name[MappingProxyType], parameter[call[name[OrderedDict], parameter[name[topics_sorted]]]]]] | keyword[def] identifier[topics] ( identifier[self] ):
literal[string]
identifier[topics_sorted] = identifier[sorted] ( identifier[self] . identifier[_topics] . identifier[items] (), identifier[key] = keyword[lambda] identifier[t] : identifier[t] [ literal[int] ])
keyword[return] identifier[MappingProxyType] ( identifier[OrderedDict] ( identifier[topics_sorted] )) | def topics(self):
""" Ordered dictionary with path:topic ordered by path """
topics_sorted = sorted(self._topics.items(), key=lambda t: t[0])
return MappingProxyType(OrderedDict(topics_sorted)) |
def append(self, data):
"""
Append data to the end of the stream. The pointer will not move if
this operation is successful.
@param data: The data to append to the stream.
@type data: C{str} or C{unicode}
@raise TypeError: data is not C{str} or C{unicode}
"""
t = self.tell()
# seek to the end of the stream
self.seek(0, 2)
if hasattr(data, 'getvalue'):
self.write_utf8_string(data.getvalue())
else:
self.write_utf8_string(data)
self.seek(t) | def function[append, parameter[self, data]]:
constant[
Append data to the end of the stream. The pointer will not move if
this operation is successful.
@param data: The data to append to the stream.
@type data: C{str} or C{unicode}
@raise TypeError: data is not C{str} or C{unicode}
]
variable[t] assign[=] call[name[self].tell, parameter[]]
call[name[self].seek, parameter[constant[0], constant[2]]]
if call[name[hasattr], parameter[name[data], constant[getvalue]]] begin[:]
call[name[self].write_utf8_string, parameter[call[name[data].getvalue, parameter[]]]]
call[name[self].seek, parameter[name[t]]] | keyword[def] identifier[append] ( identifier[self] , identifier[data] ):
literal[string]
identifier[t] = identifier[self] . identifier[tell] ()
identifier[self] . identifier[seek] ( literal[int] , literal[int] )
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[self] . identifier[write_utf8_string] ( identifier[data] . identifier[getvalue] ())
keyword[else] :
identifier[self] . identifier[write_utf8_string] ( identifier[data] )
identifier[self] . identifier[seek] ( identifier[t] ) | def append(self, data):
"""
Append data to the end of the stream. The pointer will not move if
this operation is successful.
@param data: The data to append to the stream.
@type data: C{str} or C{unicode}
@raise TypeError: data is not C{str} or C{unicode}
"""
t = self.tell()
# seek to the end of the stream
self.seek(0, 2)
if hasattr(data, 'getvalue'):
self.write_utf8_string(data.getvalue()) # depends on [control=['if'], data=[]]
else:
self.write_utf8_string(data)
self.seek(t) |
def _send_resolve_request(self):
"sends RESOLVE_PTR request (Tor custom)"
host = self._addr.host.encode()
self._data_to_send(
struct.pack(
'!BBBBB{}sH'.format(len(host)),
5, # version
0xF0, # command
0x00, # reserved
0x03, # DOMAINNAME
len(host),
host,
0, # self._addr.port?
)
) | def function[_send_resolve_request, parameter[self]]:
constant[sends RESOLVE_PTR request (Tor custom)]
variable[host] assign[=] call[name[self]._addr.host.encode, parameter[]]
call[name[self]._data_to_send, parameter[call[name[struct].pack, parameter[call[constant[!BBBBB{}sH].format, parameter[call[name[len], parameter[name[host]]]]], constant[5], constant[240], constant[0], constant[3], call[name[len], parameter[name[host]]], name[host], constant[0]]]]] | keyword[def] identifier[_send_resolve_request] ( identifier[self] ):
literal[string]
identifier[host] = identifier[self] . identifier[_addr] . identifier[host] . identifier[encode] ()
identifier[self] . identifier[_data_to_send] (
identifier[struct] . identifier[pack] (
literal[string] . identifier[format] ( identifier[len] ( identifier[host] )),
literal[int] ,
literal[int] ,
literal[int] ,
literal[int] ,
identifier[len] ( identifier[host] ),
identifier[host] ,
literal[int] ,
)
) | def _send_resolve_request(self):
"""sends RESOLVE_PTR request (Tor custom)"""
host = self._addr.host.encode() # version
# command
# reserved
# DOMAINNAME
# self._addr.port?
self._data_to_send(struct.pack('!BBBBB{}sH'.format(len(host)), 5, 240, 0, 3, len(host), host, 0)) |
def fill_row(self, forward, items, idx, row, ro, ri, overlap,lengths):
"Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented"
ibuf = n = 0
ro -= 1
while ibuf < row.size:
ro += 1
ix = idx[ro]
rag = items[ix]
if forward:
ri = 0 if ibuf else ri
n = min(lengths[ix] - ri, row.size - ibuf)
row[ibuf:ibuf+n] = rag[ri:ri+n]
else:
ri = lengths[ix] if ibuf else ri
n = min(ri, row.size - ibuf)
row[ibuf:ibuf+n] = rag[ri-n:ri][::-1]
ibuf += n
return ro, ri + ((n-overlap) if forward else -(n-overlap)) | def function[fill_row, parameter[self, forward, items, idx, row, ro, ri, overlap, lengths]]:
constant[Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented]
variable[ibuf] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b1df8190>
while compare[name[ibuf] less[<] name[row].size] begin[:]
<ast.AugAssign object at 0x7da1b1df8ac0>
variable[ix] assign[=] call[name[idx]][name[ro]]
variable[rag] assign[=] call[name[items]][name[ix]]
if name[forward] begin[:]
variable[ri] assign[=] <ast.IfExp object at 0x7da20e9b01c0>
variable[n] assign[=] call[name[min], parameter[binary_operation[call[name[lengths]][name[ix]] - name[ri]], binary_operation[name[row].size - name[ibuf]]]]
call[name[row]][<ast.Slice object at 0x7da20e9b2f50>] assign[=] call[name[rag]][<ast.Slice object at 0x7da20e9b34c0>]
<ast.AugAssign object at 0x7da20e9b0970>
return[tuple[[<ast.Name object at 0x7da20e9b1210>, <ast.BinOp object at 0x7da20e9b3550>]]] | keyword[def] identifier[fill_row] ( identifier[self] , identifier[forward] , identifier[items] , identifier[idx] , identifier[row] , identifier[ro] , identifier[ri] , identifier[overlap] , identifier[lengths] ):
literal[string]
identifier[ibuf] = identifier[n] = literal[int]
identifier[ro] -= literal[int]
keyword[while] identifier[ibuf] < identifier[row] . identifier[size] :
identifier[ro] += literal[int]
identifier[ix] = identifier[idx] [ identifier[ro] ]
identifier[rag] = identifier[items] [ identifier[ix] ]
keyword[if] identifier[forward] :
identifier[ri] = literal[int] keyword[if] identifier[ibuf] keyword[else] identifier[ri]
identifier[n] = identifier[min] ( identifier[lengths] [ identifier[ix] ]- identifier[ri] , identifier[row] . identifier[size] - identifier[ibuf] )
identifier[row] [ identifier[ibuf] : identifier[ibuf] + identifier[n] ]= identifier[rag] [ identifier[ri] : identifier[ri] + identifier[n] ]
keyword[else] :
identifier[ri] = identifier[lengths] [ identifier[ix] ] keyword[if] identifier[ibuf] keyword[else] identifier[ri]
identifier[n] = identifier[min] ( identifier[ri] , identifier[row] . identifier[size] - identifier[ibuf] )
identifier[row] [ identifier[ibuf] : identifier[ibuf] + identifier[n] ]= identifier[rag] [ identifier[ri] - identifier[n] : identifier[ri] ][::- literal[int] ]
identifier[ibuf] += identifier[n]
keyword[return] identifier[ro] , identifier[ri] +(( identifier[n] - identifier[overlap] ) keyword[if] identifier[forward] keyword[else] -( identifier[n] - identifier[overlap] )) | def fill_row(self, forward, items, idx, row, ro, ri, overlap, lengths):
"""Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented"""
ibuf = n = 0
ro -= 1
while ibuf < row.size:
ro += 1
ix = idx[ro]
rag = items[ix]
if forward:
ri = 0 if ibuf else ri
n = min(lengths[ix] - ri, row.size - ibuf)
row[ibuf:ibuf + n] = rag[ri:ri + n] # depends on [control=['if'], data=[]]
else:
ri = lengths[ix] if ibuf else ri
n = min(ri, row.size - ibuf)
row[ibuf:ibuf + n] = rag[ri - n:ri][::-1]
ibuf += n # depends on [control=['while'], data=['ibuf']]
return (ro, ri + (n - overlap if forward else -(n - overlap))) |
def browse(self, ms_item=None):
"""Return the sub-elements of item or of the root if item is None
:param item: Instance of sub-class of
:py:class:`soco.data_structures.MusicServiceItem`. This object must
have item_id, service_id and extended_id properties
Note:
Browsing a MSTrack item will return itself.
Note:
This plugin cannot yet set the parent ID of the results
correctly when browsing
:py:class:`soco.data_structures.MSFavorites` and
:py:class:`soco.data_structures.MSCollection` elements.
"""
# Check for correct service
if ms_item is not None and ms_item.service_id != self._service_id:
message = 'This music service item is not for this service'
raise ValueError(message)
# Form HTTP body and set parent_id
if ms_item:
body = self._browse_body(ms_item.item_id)
parent_id = ms_item.extended_id
if parent_id is None:
parent_id = ''
else:
body = self._browse_body('root')
parent_id = '0'
# Get HTTP header and post
headers = _get_header('get_metadata')
response = _post(self._url, headers, body, **self._http_vars)
# Check for errors and get XML
self._check_for_errors(response)
result_dom = XML.fromstring(really_utf8(response.text))
# Find the getMetadataResult item ...
xpath_search = './/' + _ns_tag('', 'getMetadataResult')
metadata_result = list(result_dom.findall(xpath_search))
# ... and make sure there is exactly 1
if len(metadata_result) != 1:
raise UnknownXMLStructure(
'The results XML has more than 1 \'getMetadataResult\'. This '
'is unexpected and parsing will dis-continue.'
)
metadata_result = metadata_result[0]
# Browse the children of metadata result
out = {'item_list': []}
for element in ['index', 'count', 'total']:
out[element] = metadata_result.findtext(_ns_tag('', element))
for result in metadata_result:
if result.tag in [_ns_tag('', 'mediaCollection'),
_ns_tag('', 'mediaMetadata')]:
out['item_list'].append(get_ms_item(result, self, parent_id))
return out | def function[browse, parameter[self, ms_item]]:
constant[Return the sub-elements of item or of the root if item is None
:param item: Instance of sub-class of
:py:class:`soco.data_structures.MusicServiceItem`. This object must
have item_id, service_id and extended_id properties
Note:
Browsing a MSTrack item will return itself.
Note:
This plugin cannot yet set the parent ID of the results
correctly when browsing
:py:class:`soco.data_structures.MSFavorites` and
:py:class:`soco.data_structures.MSCollection` elements.
]
if <ast.BoolOp object at 0x7da18ede71c0> begin[:]
variable[message] assign[=] constant[This music service item is not for this service]
<ast.Raise object at 0x7da18ede42e0>
if name[ms_item] begin[:]
variable[body] assign[=] call[name[self]._browse_body, parameter[name[ms_item].item_id]]
variable[parent_id] assign[=] name[ms_item].extended_id
if compare[name[parent_id] is constant[None]] begin[:]
variable[parent_id] assign[=] constant[]
variable[headers] assign[=] call[name[_get_header], parameter[constant[get_metadata]]]
variable[response] assign[=] call[name[_post], parameter[name[self]._url, name[headers], name[body]]]
call[name[self]._check_for_errors, parameter[name[response]]]
variable[result_dom] assign[=] call[name[XML].fromstring, parameter[call[name[really_utf8], parameter[name[response].text]]]]
variable[xpath_search] assign[=] binary_operation[constant[.//] + call[name[_ns_tag], parameter[constant[], constant[getMetadataResult]]]]
variable[metadata_result] assign[=] call[name[list], parameter[call[name[result_dom].findall, parameter[name[xpath_search]]]]]
if compare[call[name[len], parameter[name[metadata_result]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da18ede6ce0>
variable[metadata_result] assign[=] call[name[metadata_result]][constant[0]]
variable[out] assign[=] dictionary[[<ast.Constant object at 0x7da18ede4610>], [<ast.List object at 0x7da18ede6350>]]
for taget[name[element]] in starred[list[[<ast.Constant object at 0x7da18ede6530>, <ast.Constant object at 0x7da18ede4250>, <ast.Constant object at 0x7da18ede6800>]]] begin[:]
call[name[out]][name[element]] assign[=] call[name[metadata_result].findtext, parameter[call[name[_ns_tag], parameter[constant[], name[element]]]]]
for taget[name[result]] in starred[name[metadata_result]] begin[:]
if compare[name[result].tag in list[[<ast.Call object at 0x7da18ede52a0>, <ast.Call object at 0x7da18ede65c0>]]] begin[:]
call[call[name[out]][constant[item_list]].append, parameter[call[name[get_ms_item], parameter[name[result], name[self], name[parent_id]]]]]
return[name[out]] | keyword[def] identifier[browse] ( identifier[self] , identifier[ms_item] = keyword[None] ):
literal[string]
keyword[if] identifier[ms_item] keyword[is] keyword[not] keyword[None] keyword[and] identifier[ms_item] . identifier[service_id] != identifier[self] . identifier[_service_id] :
identifier[message] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[message] )
keyword[if] identifier[ms_item] :
identifier[body] = identifier[self] . identifier[_browse_body] ( identifier[ms_item] . identifier[item_id] )
identifier[parent_id] = identifier[ms_item] . identifier[extended_id]
keyword[if] identifier[parent_id] keyword[is] keyword[None] :
identifier[parent_id] = literal[string]
keyword[else] :
identifier[body] = identifier[self] . identifier[_browse_body] ( literal[string] )
identifier[parent_id] = literal[string]
identifier[headers] = identifier[_get_header] ( literal[string] )
identifier[response] = identifier[_post] ( identifier[self] . identifier[_url] , identifier[headers] , identifier[body] ,** identifier[self] . identifier[_http_vars] )
identifier[self] . identifier[_check_for_errors] ( identifier[response] )
identifier[result_dom] = identifier[XML] . identifier[fromstring] ( identifier[really_utf8] ( identifier[response] . identifier[text] ))
identifier[xpath_search] = literal[string] + identifier[_ns_tag] ( literal[string] , literal[string] )
identifier[metadata_result] = identifier[list] ( identifier[result_dom] . identifier[findall] ( identifier[xpath_search] ))
keyword[if] identifier[len] ( identifier[metadata_result] )!= literal[int] :
keyword[raise] identifier[UnknownXMLStructure] (
literal[string]
literal[string]
)
identifier[metadata_result] = identifier[metadata_result] [ literal[int] ]
identifier[out] ={ literal[string] :[]}
keyword[for] identifier[element] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[out] [ identifier[element] ]= identifier[metadata_result] . identifier[findtext] ( identifier[_ns_tag] ( literal[string] , identifier[element] ))
keyword[for] identifier[result] keyword[in] identifier[metadata_result] :
keyword[if] identifier[result] . identifier[tag] keyword[in] [ identifier[_ns_tag] ( literal[string] , literal[string] ),
identifier[_ns_tag] ( literal[string] , literal[string] )]:
identifier[out] [ literal[string] ]. identifier[append] ( identifier[get_ms_item] ( identifier[result] , identifier[self] , identifier[parent_id] ))
keyword[return] identifier[out] | def browse(self, ms_item=None):
"""Return the sub-elements of item or of the root if item is None
:param item: Instance of sub-class of
:py:class:`soco.data_structures.MusicServiceItem`. This object must
have item_id, service_id and extended_id properties
Note:
Browsing a MSTrack item will return itself.
Note:
This plugin cannot yet set the parent ID of the results
correctly when browsing
:py:class:`soco.data_structures.MSFavorites` and
:py:class:`soco.data_structures.MSCollection` elements.
"""
# Check for correct service
if ms_item is not None and ms_item.service_id != self._service_id:
message = 'This music service item is not for this service'
raise ValueError(message) # depends on [control=['if'], data=[]]
# Form HTTP body and set parent_id
if ms_item:
body = self._browse_body(ms_item.item_id)
parent_id = ms_item.extended_id
if parent_id is None:
parent_id = '' # depends on [control=['if'], data=['parent_id']] # depends on [control=['if'], data=[]]
else:
body = self._browse_body('root')
parent_id = '0'
# Get HTTP header and post
headers = _get_header('get_metadata')
response = _post(self._url, headers, body, **self._http_vars)
# Check for errors and get XML
self._check_for_errors(response)
result_dom = XML.fromstring(really_utf8(response.text))
# Find the getMetadataResult item ...
xpath_search = './/' + _ns_tag('', 'getMetadataResult')
metadata_result = list(result_dom.findall(xpath_search))
# ... and make sure there is exactly 1
if len(metadata_result) != 1:
raise UnknownXMLStructure("The results XML has more than 1 'getMetadataResult'. This is unexpected and parsing will dis-continue.") # depends on [control=['if'], data=[]]
metadata_result = metadata_result[0]
# Browse the children of metadata result
out = {'item_list': []}
for element in ['index', 'count', 'total']:
out[element] = metadata_result.findtext(_ns_tag('', element)) # depends on [control=['for'], data=['element']]
for result in metadata_result:
if result.tag in [_ns_tag('', 'mediaCollection'), _ns_tag('', 'mediaMetadata')]:
out['item_list'].append(get_ms_item(result, self, parent_id)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']]
return out |
def _parse(self, source, accept_encoded_idn, only_icann=False):
""" PSL parser core """
publicsuffix = set()
maxlabel = 0
section_is_icann = None
if isinstance(source, decodablestr):
source = source.splitlines()
ln = 0
for line in source:
ln += 1
if only_icann:
ul = u(line).rstrip()
if ul == "// ===BEGIN ICANN DOMAINS===":
section_is_icann = True
continue
elif ul == "// ===END ICANN DOMAINS===":
section_is_icann = False
continue
if not section_is_icann:
continue
s = u(line).lower().split(" ")[0].rstrip()
if s == "" or s.startswith("//"):
continue
maxlabel = max(maxlabel, s.count(".") + 1)
publicsuffix.add(s)
if accept_encoded_idn:
e = encode_idn(s.lstrip("!"))
if s[0] == "!":
publicsuffix.add("!" + e)
else:
publicsuffix.add(e)
self._publicsuffix = frozenset(publicsuffix)
self._maxlabel = maxlabel | def function[_parse, parameter[self, source, accept_encoded_idn, only_icann]]:
constant[ PSL parser core ]
variable[publicsuffix] assign[=] call[name[set], parameter[]]
variable[maxlabel] assign[=] constant[0]
variable[section_is_icann] assign[=] constant[None]
if call[name[isinstance], parameter[name[source], name[decodablestr]]] begin[:]
variable[source] assign[=] call[name[source].splitlines, parameter[]]
variable[ln] assign[=] constant[0]
for taget[name[line]] in starred[name[source]] begin[:]
<ast.AugAssign object at 0x7da18f723100>
if name[only_icann] begin[:]
variable[ul] assign[=] call[call[name[u], parameter[name[line]]].rstrip, parameter[]]
if compare[name[ul] equal[==] constant[// ===BEGIN ICANN DOMAINS===]] begin[:]
variable[section_is_icann] assign[=] constant[True]
continue
if <ast.UnaryOp object at 0x7da20c7cb670> begin[:]
continue
variable[s] assign[=] call[call[call[call[call[name[u], parameter[name[line]]].lower, parameter[]].split, parameter[constant[ ]]]][constant[0]].rstrip, parameter[]]
if <ast.BoolOp object at 0x7da20c7c87c0> begin[:]
continue
variable[maxlabel] assign[=] call[name[max], parameter[name[maxlabel], binary_operation[call[name[s].count, parameter[constant[.]]] + constant[1]]]]
call[name[publicsuffix].add, parameter[name[s]]]
if name[accept_encoded_idn] begin[:]
variable[e] assign[=] call[name[encode_idn], parameter[call[name[s].lstrip, parameter[constant[!]]]]]
if compare[call[name[s]][constant[0]] equal[==] constant[!]] begin[:]
call[name[publicsuffix].add, parameter[binary_operation[constant[!] + name[e]]]]
name[self]._publicsuffix assign[=] call[name[frozenset], parameter[name[publicsuffix]]]
name[self]._maxlabel assign[=] name[maxlabel] | keyword[def] identifier[_parse] ( identifier[self] , identifier[source] , identifier[accept_encoded_idn] , identifier[only_icann] = keyword[False] ):
literal[string]
identifier[publicsuffix] = identifier[set] ()
identifier[maxlabel] = literal[int]
identifier[section_is_icann] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[source] , identifier[decodablestr] ):
identifier[source] = identifier[source] . identifier[splitlines] ()
identifier[ln] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[source] :
identifier[ln] += literal[int]
keyword[if] identifier[only_icann] :
identifier[ul] = identifier[u] ( identifier[line] ). identifier[rstrip] ()
keyword[if] identifier[ul] == literal[string] :
identifier[section_is_icann] = keyword[True]
keyword[continue]
keyword[elif] identifier[ul] == literal[string] :
identifier[section_is_icann] = keyword[False]
keyword[continue]
keyword[if] keyword[not] identifier[section_is_icann] :
keyword[continue]
identifier[s] = identifier[u] ( identifier[line] ). identifier[lower] (). identifier[split] ( literal[string] )[ literal[int] ]. identifier[rstrip] ()
keyword[if] identifier[s] == literal[string] keyword[or] identifier[s] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[maxlabel] = identifier[max] ( identifier[maxlabel] , identifier[s] . identifier[count] ( literal[string] )+ literal[int] )
identifier[publicsuffix] . identifier[add] ( identifier[s] )
keyword[if] identifier[accept_encoded_idn] :
identifier[e] = identifier[encode_idn] ( identifier[s] . identifier[lstrip] ( literal[string] ))
keyword[if] identifier[s] [ literal[int] ]== literal[string] :
identifier[publicsuffix] . identifier[add] ( literal[string] + identifier[e] )
keyword[else] :
identifier[publicsuffix] . identifier[add] ( identifier[e] )
identifier[self] . identifier[_publicsuffix] = identifier[frozenset] ( identifier[publicsuffix] )
identifier[self] . identifier[_maxlabel] = identifier[maxlabel] | def _parse(self, source, accept_encoded_idn, only_icann=False):
""" PSL parser core """
publicsuffix = set()
maxlabel = 0
section_is_icann = None
if isinstance(source, decodablestr):
source = source.splitlines() # depends on [control=['if'], data=[]]
ln = 0
for line in source:
ln += 1
if only_icann:
ul = u(line).rstrip()
if ul == '// ===BEGIN ICANN DOMAINS===':
section_is_icann = True
continue # depends on [control=['if'], data=[]]
elif ul == '// ===END ICANN DOMAINS===':
section_is_icann = False
continue # depends on [control=['if'], data=[]]
if not section_is_icann:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
s = u(line).lower().split(' ')[0].rstrip()
if s == '' or s.startswith('//'):
continue # depends on [control=['if'], data=[]]
maxlabel = max(maxlabel, s.count('.') + 1)
publicsuffix.add(s)
if accept_encoded_idn:
e = encode_idn(s.lstrip('!'))
if s[0] == '!':
publicsuffix.add('!' + e) # depends on [control=['if'], data=[]]
else:
publicsuffix.add(e) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
self._publicsuffix = frozenset(publicsuffix)
self._maxlabel = maxlabel |
def loads(s, model=None, parser=None):
"""Deserialize s (a str) to a Python object."""
with StringIO(s) as f:
return load(f, model=model, parser=parser) | def function[loads, parameter[s, model, parser]]:
constant[Deserialize s (a str) to a Python object.]
with call[name[StringIO], parameter[name[s]]] begin[:]
return[call[name[load], parameter[name[f]]]] | keyword[def] identifier[loads] ( identifier[s] , identifier[model] = keyword[None] , identifier[parser] = keyword[None] ):
literal[string]
keyword[with] identifier[StringIO] ( identifier[s] ) keyword[as] identifier[f] :
keyword[return] identifier[load] ( identifier[f] , identifier[model] = identifier[model] , identifier[parser] = identifier[parser] ) | def loads(s, model=None, parser=None):
"""Deserialize s (a str) to a Python object."""
with StringIO(s) as f:
return load(f, model=model, parser=parser) # depends on [control=['with'], data=['f']] |
def execute_query(cmd, client, application, analytics_query, start_time=None, end_time=None, offset='1h', resource_group_name=None):
"""Executes a query against the provided Application Insights application."""
from .vendored_sdks.applicationinsights.models import QueryBody
targets = get_query_targets(cmd.cli_ctx, application, resource_group_name)
return client.query.execute(targets[0], QueryBody(query=analytics_query, timespan=get_timespan(cmd.cli_ctx, start_time, end_time, offset), applications=targets[1:])) | def function[execute_query, parameter[cmd, client, application, analytics_query, start_time, end_time, offset, resource_group_name]]:
constant[Executes a query against the provided Application Insights application.]
from relative_module[vendored_sdks.applicationinsights.models] import module[QueryBody]
variable[targets] assign[=] call[name[get_query_targets], parameter[name[cmd].cli_ctx, name[application], name[resource_group_name]]]
return[call[name[client].query.execute, parameter[call[name[targets]][constant[0]], call[name[QueryBody], parameter[]]]]] | keyword[def] identifier[execute_query] ( identifier[cmd] , identifier[client] , identifier[application] , identifier[analytics_query] , identifier[start_time] = keyword[None] , identifier[end_time] = keyword[None] , identifier[offset] = literal[string] , identifier[resource_group_name] = keyword[None] ):
literal[string]
keyword[from] . identifier[vendored_sdks] . identifier[applicationinsights] . identifier[models] keyword[import] identifier[QueryBody]
identifier[targets] = identifier[get_query_targets] ( identifier[cmd] . identifier[cli_ctx] , identifier[application] , identifier[resource_group_name] )
keyword[return] identifier[client] . identifier[query] . identifier[execute] ( identifier[targets] [ literal[int] ], identifier[QueryBody] ( identifier[query] = identifier[analytics_query] , identifier[timespan] = identifier[get_timespan] ( identifier[cmd] . identifier[cli_ctx] , identifier[start_time] , identifier[end_time] , identifier[offset] ), identifier[applications] = identifier[targets] [ literal[int] :])) | def execute_query(cmd, client, application, analytics_query, start_time=None, end_time=None, offset='1h', resource_group_name=None):
"""Executes a query against the provided Application Insights application."""
from .vendored_sdks.applicationinsights.models import QueryBody
targets = get_query_targets(cmd.cli_ctx, application, resource_group_name)
return client.query.execute(targets[0], QueryBody(query=analytics_query, timespan=get_timespan(cmd.cli_ctx, start_time, end_time, offset), applications=targets[1:])) |
def _get_nonlinear_site_term(self, C, vs30, pga_rock):
"""
Returns the nonlinear site scaling term (equation 7)
"""
v_s = np.copy(vs30)
v_s[vs30 > 760.] = 760.
# Nonlinear controlling parameter (equation 8)
f_2 = C["f4"] * (np.exp(C["f5"] * (v_s - 360.)) -
np.exp(C["f5"] * 400.))
fnl = self.CONSTS["f1"] + f_2 * np.log((pga_rock + self.CONSTS["f3"]) /
self.CONSTS["f3"])
return fnl | def function[_get_nonlinear_site_term, parameter[self, C, vs30, pga_rock]]:
constant[
Returns the nonlinear site scaling term (equation 7)
]
variable[v_s] assign[=] call[name[np].copy, parameter[name[vs30]]]
call[name[v_s]][compare[name[vs30] greater[>] constant[760.0]]] assign[=] constant[760.0]
variable[f_2] assign[=] binary_operation[call[name[C]][constant[f4]] * binary_operation[call[name[np].exp, parameter[binary_operation[call[name[C]][constant[f5]] * binary_operation[name[v_s] - constant[360.0]]]]] - call[name[np].exp, parameter[binary_operation[call[name[C]][constant[f5]] * constant[400.0]]]]]]
variable[fnl] assign[=] binary_operation[call[name[self].CONSTS][constant[f1]] + binary_operation[name[f_2] * call[name[np].log, parameter[binary_operation[binary_operation[name[pga_rock] + call[name[self].CONSTS][constant[f3]]] / call[name[self].CONSTS][constant[f3]]]]]]]
return[name[fnl]] | keyword[def] identifier[_get_nonlinear_site_term] ( identifier[self] , identifier[C] , identifier[vs30] , identifier[pga_rock] ):
literal[string]
identifier[v_s] = identifier[np] . identifier[copy] ( identifier[vs30] )
identifier[v_s] [ identifier[vs30] > literal[int] ]= literal[int]
identifier[f_2] = identifier[C] [ literal[string] ]*( identifier[np] . identifier[exp] ( identifier[C] [ literal[string] ]*( identifier[v_s] - literal[int] ))-
identifier[np] . identifier[exp] ( identifier[C] [ literal[string] ]* literal[int] ))
identifier[fnl] = identifier[self] . identifier[CONSTS] [ literal[string] ]+ identifier[f_2] * identifier[np] . identifier[log] (( identifier[pga_rock] + identifier[self] . identifier[CONSTS] [ literal[string] ])/
identifier[self] . identifier[CONSTS] [ literal[string] ])
keyword[return] identifier[fnl] | def _get_nonlinear_site_term(self, C, vs30, pga_rock):
"""
Returns the nonlinear site scaling term (equation 7)
"""
v_s = np.copy(vs30)
v_s[vs30 > 760.0] = 760.0
# Nonlinear controlling parameter (equation 8)
f_2 = C['f4'] * (np.exp(C['f5'] * (v_s - 360.0)) - np.exp(C['f5'] * 400.0))
fnl = self.CONSTS['f1'] + f_2 * np.log((pga_rock + self.CONSTS['f3']) / self.CONSTS['f3'])
return fnl |
def load_chunk(filename, bounds, encoding='utf8', slow=False):
"""
Load a chunk from file using Bounds info.
Pass 'slow=True' for an alternative loading method based on line numbers.
"""
if slow:
return _load_chunk_slow(filename, bounds, encoding)
with open(filename, 'rb') as f:
f.seek(bounds.byte_start)
size = bounds.byte_end - bounds.byte_start
return f.read(size).decode(encoding) | def function[load_chunk, parameter[filename, bounds, encoding, slow]]:
constant[
Load a chunk from file using Bounds info.
Pass 'slow=True' for an alternative loading method based on line numbers.
]
if name[slow] begin[:]
return[call[name[_load_chunk_slow], parameter[name[filename], name[bounds], name[encoding]]]]
with call[name[open], parameter[name[filename], constant[rb]]] begin[:]
call[name[f].seek, parameter[name[bounds].byte_start]]
variable[size] assign[=] binary_operation[name[bounds].byte_end - name[bounds].byte_start]
return[call[call[name[f].read, parameter[name[size]]].decode, parameter[name[encoding]]]] | keyword[def] identifier[load_chunk] ( identifier[filename] , identifier[bounds] , identifier[encoding] = literal[string] , identifier[slow] = keyword[False] ):
literal[string]
keyword[if] identifier[slow] :
keyword[return] identifier[_load_chunk_slow] ( identifier[filename] , identifier[bounds] , identifier[encoding] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[seek] ( identifier[bounds] . identifier[byte_start] )
identifier[size] = identifier[bounds] . identifier[byte_end] - identifier[bounds] . identifier[byte_start]
keyword[return] identifier[f] . identifier[read] ( identifier[size] ). identifier[decode] ( identifier[encoding] ) | def load_chunk(filename, bounds, encoding='utf8', slow=False):
"""
Load a chunk from file using Bounds info.
Pass 'slow=True' for an alternative loading method based on line numbers.
"""
if slow:
return _load_chunk_slow(filename, bounds, encoding) # depends on [control=['if'], data=[]]
with open(filename, 'rb') as f:
f.seek(bounds.byte_start)
size = bounds.byte_end - bounds.byte_start
return f.read(size).decode(encoding) # depends on [control=['with'], data=['f']] |
def get_airport_metars(self, iata, page=1, limit=100):
"""Retrieve the metar data at the current time
Given the IATA code of an airport, this method returns the metar information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars('HYD')
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
w = self._fr24.get_airport_weather(url)
return w['metar'] | def function[get_airport_metars, parameter[self, iata, page, limit]]:
constant[Retrieve the metar data at the current time
Given the IATA code of an airport, this method returns the metar information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars('HYD')
]
variable[url] assign[=] call[name[AIRPORT_DATA_BASE].format, parameter[name[iata], call[name[str], parameter[name[self].AUTH_TOKEN]], name[page], name[limit]]]
variable[w] assign[=] call[name[self]._fr24.get_airport_weather, parameter[name[url]]]
return[call[name[w]][constant[metar]]] | keyword[def] identifier[get_airport_metars] ( identifier[self] , identifier[iata] , identifier[page] = literal[int] , identifier[limit] = literal[int] ):
literal[string]
identifier[url] = identifier[AIRPORT_DATA_BASE] . identifier[format] ( identifier[iata] , identifier[str] ( identifier[self] . identifier[AUTH_TOKEN] ), identifier[page] , identifier[limit] )
identifier[w] = identifier[self] . identifier[_fr24] . identifier[get_airport_weather] ( identifier[url] )
keyword[return] identifier[w] [ literal[string] ] | def get_airport_metars(self, iata, page=1, limit=100):
"""Retrieve the metar data at the current time
Given the IATA code of an airport, this method returns the metar information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars('HYD')
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
w = self._fr24.get_airport_weather(url)
return w['metar'] |
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth at line %s.",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level at line %s.",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested at line %s.",
NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name at line %s.',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error(
'Invalid line at line "%s".',
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name at line %s.',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values | def function[_parse, parameter[self, infile]]:
constant[Actually parse the config file.]
variable[temp_list_values] assign[=] name[self].list_values
if name[self].unrepr begin[:]
name[self].list_values assign[=] constant[False]
variable[comment_list] assign[=] list[[]]
variable[done_start] assign[=] constant[False]
variable[this_section] assign[=] name[self]
variable[maxline] assign[=] binary_operation[call[name[len], parameter[name[infile]]] - constant[1]]
variable[cur_index] assign[=] <ast.UnaryOp object at 0x7da1b0edf070>
variable[reset_comment] assign[=] constant[False]
while compare[name[cur_index] less[<] name[maxline]] begin[:]
if name[reset_comment] begin[:]
variable[comment_list] assign[=] list[[]]
<ast.AugAssign object at 0x7da1b0edf5e0>
variable[line] assign[=] call[name[infile]][name[cur_index]]
variable[sline] assign[=] call[name[line].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b0edf610> begin[:]
variable[reset_comment] assign[=] constant[False]
call[name[comment_list].append, parameter[name[line]]]
continue
if <ast.UnaryOp object at 0x7da1b0edf3a0> begin[:]
name[self].initial_comment assign[=] name[comment_list]
variable[comment_list] assign[=] list[[]]
variable[done_start] assign[=] constant[True]
variable[reset_comment] assign[=] constant[True]
variable[mat] assign[=] call[name[self]._sectionmarker.match, parameter[name[line]]]
if compare[name[mat] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b0edfd00> assign[=] call[name[mat].groups, parameter[]]
if <ast.BoolOp object at 0x7da1b0edc9a0> begin[:]
name[self].indent_type assign[=] name[indent]
variable[cur_depth] assign[=] call[name[sect_open].count, parameter[constant[[]]]
if compare[name[cur_depth] not_equal[!=] call[name[sect_close].count, parameter[constant[]]]]] begin[:]
call[name[self]._handle_error, parameter[constant[Cannot compute the section depth at line %s.], name[NestingError], name[infile], name[cur_index]]]
continue
if compare[name[cur_depth] less[<] name[this_section].depth] begin[:]
<ast.Try object at 0x7da1b0f50220>
variable[sect_name] assign[=] call[name[self]._unquote, parameter[name[sect_name]]]
if compare[name[sect_name] in name[parent]] begin[:]
call[name[self]._handle_error, parameter[constant[Duplicate section name at line %s.], name[DuplicateError], name[infile], name[cur_index]]]
continue
variable[this_section] assign[=] call[name[Section], parameter[name[parent], name[cur_depth], name[self]]]
call[name[parent]][name[sect_name]] assign[=] name[this_section]
call[name[parent].inline_comments][name[sect_name]] assign[=] name[comment]
call[name[parent].comments][name[sect_name]] assign[=] name[comment_list]
continue
variable[mat] assign[=] call[name[self]._keyword.match, parameter[name[line]]]
if compare[name[mat] is constant[None]] begin[:]
call[name[self]._handle_error, parameter[constant[Invalid line at line "%s".], name[ParseError], name[infile], name[cur_index]]]
if compare[name[self].indent_type is constant[None]] begin[:]
name[self].indent_type assign[=] constant[]
if <ast.BoolOp object at 0x7da1b0f0d2d0> begin[:]
name[self].initial_comment assign[=] name[comment_list]
name[self].list_values assign[=] name[temp_list_values] | keyword[def] identifier[_parse] ( identifier[self] , identifier[infile] ):
literal[string]
identifier[temp_list_values] = identifier[self] . identifier[list_values]
keyword[if] identifier[self] . identifier[unrepr] :
identifier[self] . identifier[list_values] = keyword[False]
identifier[comment_list] =[]
identifier[done_start] = keyword[False]
identifier[this_section] = identifier[self]
identifier[maxline] = identifier[len] ( identifier[infile] )- literal[int]
identifier[cur_index] =- literal[int]
identifier[reset_comment] = keyword[False]
keyword[while] identifier[cur_index] < identifier[maxline] :
keyword[if] identifier[reset_comment] :
identifier[comment_list] =[]
identifier[cur_index] += literal[int]
identifier[line] = identifier[infile] [ identifier[cur_index] ]
identifier[sline] = identifier[line] . identifier[strip] ()
keyword[if] keyword[not] identifier[sline] keyword[or] identifier[sline] . identifier[startswith] ( literal[string] ):
identifier[reset_comment] = keyword[False]
identifier[comment_list] . identifier[append] ( identifier[line] )
keyword[continue]
keyword[if] keyword[not] identifier[done_start] :
identifier[self] . identifier[initial_comment] = identifier[comment_list]
identifier[comment_list] =[]
identifier[done_start] = keyword[True]
identifier[reset_comment] = keyword[True]
identifier[mat] = identifier[self] . identifier[_sectionmarker] . identifier[match] ( identifier[line] )
keyword[if] identifier[mat] keyword[is] keyword[not] keyword[None] :
( identifier[indent] , identifier[sect_open] , identifier[sect_name] , identifier[sect_close] , identifier[comment] )= identifier[mat] . identifier[groups] ()
keyword[if] identifier[indent] keyword[and] ( identifier[self] . identifier[indent_type] keyword[is] keyword[None] ):
identifier[self] . identifier[indent_type] = identifier[indent]
identifier[cur_depth] = identifier[sect_open] . identifier[count] ( literal[string] )
keyword[if] identifier[cur_depth] != identifier[sect_close] . identifier[count] ( literal[string] ):
identifier[self] . identifier[_handle_error] ( literal[string] ,
identifier[NestingError] , identifier[infile] , identifier[cur_index] )
keyword[continue]
keyword[if] identifier[cur_depth] < identifier[this_section] . identifier[depth] :
keyword[try] :
identifier[parent] = identifier[self] . identifier[_match_depth] ( identifier[this_section] ,
identifier[cur_depth] ). identifier[parent]
keyword[except] identifier[SyntaxError] :
identifier[self] . identifier[_handle_error] ( literal[string] ,
identifier[NestingError] , identifier[infile] , identifier[cur_index] )
keyword[continue]
keyword[elif] identifier[cur_depth] == identifier[this_section] . identifier[depth] :
identifier[parent] = identifier[this_section] . identifier[parent]
keyword[elif] identifier[cur_depth] == identifier[this_section] . identifier[depth] + literal[int] :
identifier[parent] = identifier[this_section]
keyword[else] :
identifier[self] . identifier[_handle_error] ( literal[string] ,
identifier[NestingError] , identifier[infile] , identifier[cur_index] )
identifier[sect_name] = identifier[self] . identifier[_unquote] ( identifier[sect_name] )
keyword[if] identifier[sect_name] keyword[in] identifier[parent] :
identifier[self] . identifier[_handle_error] ( literal[string] ,
identifier[DuplicateError] , identifier[infile] , identifier[cur_index] )
keyword[continue]
identifier[this_section] = identifier[Section] (
identifier[parent] ,
identifier[cur_depth] ,
identifier[self] ,
identifier[name] = identifier[sect_name] )
identifier[parent] [ identifier[sect_name] ]= identifier[this_section]
identifier[parent] . identifier[inline_comments] [ identifier[sect_name] ]= identifier[comment]
identifier[parent] . identifier[comments] [ identifier[sect_name] ]= identifier[comment_list]
keyword[continue]
identifier[mat] = identifier[self] . identifier[_keyword] . identifier[match] ( identifier[line] )
keyword[if] identifier[mat] keyword[is] keyword[None] :
identifier[self] . identifier[_handle_error] (
literal[string] ,
identifier[ParseError] , identifier[infile] , identifier[cur_index] )
keyword[else] :
( identifier[indent] , identifier[key] , identifier[value] )= identifier[mat] . identifier[groups] ()
keyword[if] identifier[indent] keyword[and] ( identifier[self] . identifier[indent_type] keyword[is] keyword[None] ):
identifier[self] . identifier[indent_type] = identifier[indent]
keyword[if] identifier[value] [: literal[int] ] keyword[in] [ literal[string] , literal[string] ]:
keyword[try] :
identifier[value] , identifier[comment] , identifier[cur_index] = identifier[self] . identifier[_multiline] (
identifier[value] , identifier[infile] , identifier[cur_index] , identifier[maxline] )
keyword[except] identifier[SyntaxError] :
identifier[self] . identifier[_handle_error] (
literal[string] ,
identifier[ParseError] , identifier[infile] , identifier[cur_index] )
keyword[continue]
keyword[else] :
keyword[if] identifier[self] . identifier[unrepr] :
identifier[comment] = literal[string]
keyword[try] :
identifier[value] = identifier[unrepr] ( identifier[value] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[type] ( identifier[e] )== identifier[UnknownType] :
identifier[msg] = literal[string]
keyword[else] :
identifier[msg] = literal[string]
identifier[self] . identifier[_handle_error] ( identifier[msg] , identifier[UnreprError] , identifier[infile] ,
identifier[cur_index] )
keyword[continue]
keyword[else] :
keyword[if] identifier[self] . identifier[unrepr] :
identifier[comment] = literal[string]
keyword[try] :
identifier[value] = identifier[unrepr] ( identifier[value] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[isinstance] ( identifier[e] , identifier[UnknownType] ):
identifier[msg] = literal[string]
keyword[else] :
identifier[msg] = literal[string]
identifier[self] . identifier[_handle_error] ( identifier[msg] , identifier[UnreprError] , identifier[infile] ,
identifier[cur_index] )
keyword[continue]
keyword[else] :
keyword[try] :
( identifier[value] , identifier[comment] )= identifier[self] . identifier[_handle_value] ( identifier[value] )
keyword[except] identifier[SyntaxError] :
identifier[self] . identifier[_handle_error] (
literal[string] ,
identifier[ParseError] , identifier[infile] , identifier[cur_index] )
keyword[continue]
identifier[key] = identifier[self] . identifier[_unquote] ( identifier[key] )
keyword[if] identifier[key] keyword[in] identifier[this_section] :
identifier[self] . identifier[_handle_error] (
literal[string] ,
identifier[DuplicateError] , identifier[infile] , identifier[cur_index] )
keyword[continue]
identifier[this_section] . identifier[__setitem__] ( identifier[key] , identifier[value] , identifier[unrepr] = keyword[True] )
identifier[this_section] . identifier[inline_comments] [ identifier[key] ]= identifier[comment]
identifier[this_section] . identifier[comments] [ identifier[key] ]= identifier[comment_list]
keyword[continue]
keyword[if] identifier[self] . identifier[indent_type] keyword[is] keyword[None] :
identifier[self] . identifier[indent_type] = literal[string]
keyword[if] keyword[not] identifier[self] keyword[and] keyword[not] identifier[self] . identifier[initial_comment] :
identifier[self] . identifier[initial_comment] = identifier[comment_list]
keyword[elif] keyword[not] identifier[reset_comment] :
identifier[self] . identifier[final_comment] = identifier[comment_list]
identifier[self] . identifier[list_values] = identifier[temp_list_values] | def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False # depends on [control=['if'], data=[]]
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = [] # depends on [control=['if'], data=[]]
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue # depends on [control=['if'], data=[]]
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True # depends on [control=['if'], data=[]]
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and self.indent_type is None:
self.indent_type = indent # depends on [control=['if'], data=[]]
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error('Cannot compute the section depth at line %s.', NestingError, infile, cur_index)
continue # depends on [control=['if'], data=[]]
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section, cur_depth).parent # depends on [control=['try'], data=[]]
except SyntaxError:
self._handle_error('Cannot compute nesting level at line %s.', NestingError, infile, cur_index)
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['cur_depth']]
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent # depends on [control=['if'], data=[]]
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section # depends on [control=['if'], data=[]]
else:
self._handle_error('Section too nested at line %s.', NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index)
continue # depends on [control=['if'], data=[]]
# create the new section
this_section = Section(parent, cur_depth, self, name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue # depends on [control=['if'], data=['mat']]
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error('Invalid line at line "%s".', ParseError, infile, cur_index) # depends on [control=['if'], data=[]]
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and self.indent_type is None:
self.indent_type = indent # depends on [control=['if'], data=[]]
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
(value, comment, cur_index) = self._multiline(value, infile, cur_index, maxline) # depends on [control=['try'], data=[]]
except SyntaxError:
self._handle_error('Parse error in value at line %s.', ParseError, infile, cur_index)
continue # depends on [control=['except'], data=[]]
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value) # depends on [control=['try'], data=[]]
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.' # depends on [control=['if'], data=[]]
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile, cur_index)
continue # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.unrepr:
comment = ''
try:
value = unrepr(value) # depends on [control=['try'], data=[]]
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.' # depends on [control=['if'], data=[]]
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile, cur_index)
continue # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value) # depends on [control=['try'], data=[]]
except SyntaxError:
self._handle_error('Parse error in value at line %s.', ParseError, infile, cur_index)
continue # depends on [control=['except'], data=[]]
#
key = self._unquote(key)
if key in this_section:
self._handle_error('Duplicate keyword name at line %s.', DuplicateError, infile, cur_index)
continue # depends on [control=['if'], data=[]]
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue # depends on [control=['while'], data=['cur_index', 'maxline']]
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = '' # depends on [control=['if'], data=[]]
# preserve the final comment
if not self and (not self.initial_comment):
self.initial_comment = comment_list # depends on [control=['if'], data=[]]
elif not reset_comment:
self.final_comment = comment_list # depends on [control=['if'], data=[]]
self.list_values = temp_list_values |
def main(args=None):
"""Extracts gene-level expression data from StringTie output.
Parameters
----------
args: argparse.Namespace object, optional
The argument values. If not specified, the values will be obtained by
parsing the command line arguments using the `argparse` module.
Returns
-------
int
Exit code (0 if no error occurred).
"""
if args is None:
# parse command-line arguments
parser = get_argument_parser()
args = parser.parse_args()
stringtie_file = args.stringtie_file
gene_file = args.gene_file
no_novel_transcripts = args.no_novel_transcripts
output_file = args.output_file
log_file = args.log_file
quiet = args.quiet
verbose = args.verbose
logger = misc.get_logger(log_file=log_file, quiet=quiet, verbose=verbose)
# read list of gene symbols
logger.info('Reading gene data...')
genes = misc.read_single(gene_file)
# read StringTie output file and summarize FPKM and TPM per gene
logger.info('Parsing StringTie output...')
logger.info('Associating StringTie gene IDs with gene symbols...')
stringtie_genes = {}
with open(stringtie_file) as fh:
reader = csv.reader(fh, dialect='excel-tab')
for l in reader:
if l[0][0] == '#':
continue
assert len(l) == 9
if l[2] != 'transcript':
continue
attr = parse_attributes(l[8])
try:
ref_gene = attr['ref_gene_name']
except KeyError:
continue
else:
# entry has a "ref_gene_name" attribute
try:
g = stringtie_genes[attr['gene_id']]
except KeyError:
stringtie_genes[attr['gene_id']] = {ref_gene, }
else:
g.add(ref_gene)
logger.info('Associated %d gene IDs with gene symbols.',
len(stringtie_genes))
# C = Counter(len(v) for v in stringtie_genes.itervalues())
gene_ids_ambiguous = [k for k, v in stringtie_genes.items()
if len(v) > 1]
n = len(gene_ids_ambiguous)
logger.info('%d / %d associated with multiple gene symbols (%.1f%%).',
n, len(stringtie_genes), 100*(n/float(len(stringtie_genes))))
# read StringTie output file and summarize FPKM and TPM per gene
n = len(genes)
fpkm = np.zeros(n, dtype=np.float64)
tpm = np.zeros(n, dtype=np.float64)
fpkm_novel_gene = 0
fpkm_unknown_gene_name = 0
fpkm_novel_trans = 0
fpkm_ambig = 0
with open(stringtie_file) as fh:
reader = csv.reader(fh, dialect='excel-tab')
for l in reader:
if l[0][0] == '#':
# skip header
continue
assert len(l) == 9
if l[2] != 'transcript':
# skip exon lines
continue
attr = parse_attributes(l[8])
f = float(attr['FPKM'])
try:
g = attr['ref_gene_name']
except KeyError:
if no_novel_transcripts:
# ignore this transcript
fpkm_novel_trans += f
continue
else:
# see if we can assign a gene name based on the gene ID
try:
assoc = stringtie_genes[attr['gene_id']]
except KeyError:
# gene_id not associated with any reference gene
fpkm_novel_gene += f
continue
else:
if len(assoc) > 1:
# gene ID associated with multiple ref. genes
# => ingored
fpkm_ambig += f
continue
else:
# gene ID associated with exactly one ref. gene
g = list(assoc)[0]
try:
idx = misc.bisect_index(genes, g)
except ValueError:
fpkm_unknown_gene_name += f
logger.warning('Unknown gene name: "%s".', g)
continue
t = float(attr['TPM'])
fpkm[idx] += f
tpm[idx] += t
# ignored_fpkm = None
if no_novel_transcripts:
ignored_fpkm = fpkm_novel_trans + fpkm_unknown_gene_name
else:
ignored_fpkm = fpkm_novel_gene + fpkm_ambig + fpkm_unknown_gene_name
total_fpkm = np.sum(fpkm) + ignored_fpkm
logger.info('Ignored %.1f / %.1f FPKM (%.1f%%)', ignored_fpkm,
total_fpkm, 100*(ignored_fpkm/total_fpkm))
if no_novel_transcripts and fpkm_novel_trans > 0:
logger.info('Ignored %.1f FPKM from novel transcripts (%.1f%%).',
fpkm_novel_trans, 100*(fpkm_novel_trans/total_fpkm))
else:
if fpkm_novel_gene > 0:
logger.info('Ignored %.1f FPKM from transcripts of novel genes '
'(%.1f%%).',
fpkm_novel_gene, 100*(fpkm_novel_gene/total_fpkm))
if fpkm_ambig > 0:
logger.info('Ignored %.1f FPKM from transcripts with ambiguous '
'gene membership (%.1f%%).',
fpkm_ambig, 100*(fpkm_ambig/total_fpkm))
if fpkm_unknown_gene_name > 0:
logger.info('Ignored %.1f FPKM from transcripts of genes with unknown '
'names (%.1f%%).',
fpkm_unknown_gene_name,
100*(fpkm_unknown_gene_name/total_fpkm))
# write output file
E = np.c_[fpkm, tpm]
with open(output_file, 'w') as ofh:
writer = csv.writer(ofh, dialect='excel-tab',
lineterminator=os.linesep,
quoting=csv.QUOTE_NONE)
for i, g in enumerate(genes):
writer.writerow([g] + ['%.5f' % e for e in E[i, :]])
return 0 | def function[main, parameter[args]]:
constant[Extracts gene-level expression data from StringTie output.
Parameters
----------
args: argparse.Namespace object, optional
The argument values. If not specified, the values will be obtained by
parsing the command line arguments using the `argparse` module.
Returns
-------
int
Exit code (0 if no error occurred).
]
if compare[name[args] is constant[None]] begin[:]
variable[parser] assign[=] call[name[get_argument_parser], parameter[]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
variable[stringtie_file] assign[=] name[args].stringtie_file
variable[gene_file] assign[=] name[args].gene_file
variable[no_novel_transcripts] assign[=] name[args].no_novel_transcripts
variable[output_file] assign[=] name[args].output_file
variable[log_file] assign[=] name[args].log_file
variable[quiet] assign[=] name[args].quiet
variable[verbose] assign[=] name[args].verbose
variable[logger] assign[=] call[name[misc].get_logger, parameter[]]
call[name[logger].info, parameter[constant[Reading gene data...]]]
variable[genes] assign[=] call[name[misc].read_single, parameter[name[gene_file]]]
call[name[logger].info, parameter[constant[Parsing StringTie output...]]]
call[name[logger].info, parameter[constant[Associating StringTie gene IDs with gene symbols...]]]
variable[stringtie_genes] assign[=] dictionary[[], []]
with call[name[open], parameter[name[stringtie_file]]] begin[:]
variable[reader] assign[=] call[name[csv].reader, parameter[name[fh]]]
for taget[name[l]] in starred[name[reader]] begin[:]
if compare[call[call[name[l]][constant[0]]][constant[0]] equal[==] constant[#]] begin[:]
continue
assert[compare[call[name[len], parameter[name[l]]] equal[==] constant[9]]]
if compare[call[name[l]][constant[2]] not_equal[!=] constant[transcript]] begin[:]
continue
variable[attr] assign[=] call[name[parse_attributes], parameter[call[name[l]][constant[8]]]]
<ast.Try object at 0x7da1b0c9ce20>
call[name[logger].info, parameter[constant[Associated %d gene IDs with gene symbols.], call[name[len], parameter[name[stringtie_genes]]]]]
variable[gene_ids_ambiguous] assign[=] <ast.ListComp object at 0x7da1b0c9d6c0>
variable[n] assign[=] call[name[len], parameter[name[gene_ids_ambiguous]]]
call[name[logger].info, parameter[constant[%d / %d associated with multiple gene symbols (%.1f%%).], name[n], call[name[len], parameter[name[stringtie_genes]]], binary_operation[constant[100] * binary_operation[name[n] / call[name[float], parameter[call[name[len], parameter[name[stringtie_genes]]]]]]]]]
variable[n] assign[=] call[name[len], parameter[name[genes]]]
variable[fpkm] assign[=] call[name[np].zeros, parameter[name[n]]]
variable[tpm] assign[=] call[name[np].zeros, parameter[name[n]]]
variable[fpkm_novel_gene] assign[=] constant[0]
variable[fpkm_unknown_gene_name] assign[=] constant[0]
variable[fpkm_novel_trans] assign[=] constant[0]
variable[fpkm_ambig] assign[=] constant[0]
with call[name[open], parameter[name[stringtie_file]]] begin[:]
variable[reader] assign[=] call[name[csv].reader, parameter[name[fh]]]
for taget[name[l]] in starred[name[reader]] begin[:]
if compare[call[call[name[l]][constant[0]]][constant[0]] equal[==] constant[#]] begin[:]
continue
assert[compare[call[name[len], parameter[name[l]]] equal[==] constant[9]]]
if compare[call[name[l]][constant[2]] not_equal[!=] constant[transcript]] begin[:]
continue
variable[attr] assign[=] call[name[parse_attributes], parameter[call[name[l]][constant[8]]]]
variable[f] assign[=] call[name[float], parameter[call[name[attr]][constant[FPKM]]]]
<ast.Try object at 0x7da2041d8880>
<ast.Try object at 0x7da18f723760>
variable[t] assign[=] call[name[float], parameter[call[name[attr]][constant[TPM]]]]
<ast.AugAssign object at 0x7da18f722920>
<ast.AugAssign object at 0x7da18f721f90>
if name[no_novel_transcripts] begin[:]
variable[ignored_fpkm] assign[=] binary_operation[name[fpkm_novel_trans] + name[fpkm_unknown_gene_name]]
variable[total_fpkm] assign[=] binary_operation[call[name[np].sum, parameter[name[fpkm]]] + name[ignored_fpkm]]
call[name[logger].info, parameter[constant[Ignored %.1f / %.1f FPKM (%.1f%%)], name[ignored_fpkm], name[total_fpkm], binary_operation[constant[100] * binary_operation[name[ignored_fpkm] / name[total_fpkm]]]]]
if <ast.BoolOp object at 0x7da18f723280> begin[:]
call[name[logger].info, parameter[constant[Ignored %.1f FPKM from novel transcripts (%.1f%%).], name[fpkm_novel_trans], binary_operation[constant[100] * binary_operation[name[fpkm_novel_trans] / name[total_fpkm]]]]]
if compare[name[fpkm_unknown_gene_name] greater[>] constant[0]] begin[:]
call[name[logger].info, parameter[constant[Ignored %.1f FPKM from transcripts of genes with unknown names (%.1f%%).], name[fpkm_unknown_gene_name], binary_operation[constant[100] * binary_operation[name[fpkm_unknown_gene_name] / name[total_fpkm]]]]]
variable[E] assign[=] call[name[np].c_][tuple[[<ast.Name object at 0x7da18f721420>, <ast.Name object at 0x7da18f720b80>]]]
with call[name[open], parameter[name[output_file], constant[w]]] begin[:]
variable[writer] assign[=] call[name[csv].writer, parameter[name[ofh]]]
for taget[tuple[[<ast.Name object at 0x7da18f721720>, <ast.Name object at 0x7da18f7219c0>]]] in starred[call[name[enumerate], parameter[name[genes]]]] begin[:]
call[name[writer].writerow, parameter[binary_operation[list[[<ast.Name object at 0x7da18f7214e0>]] + <ast.ListComp object at 0x7da18f7234c0>]]]
return[constant[0]] | keyword[def] identifier[main] ( identifier[args] = keyword[None] ):
literal[string]
keyword[if] identifier[args] keyword[is] keyword[None] :
identifier[parser] = identifier[get_argument_parser] ()
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[stringtie_file] = identifier[args] . identifier[stringtie_file]
identifier[gene_file] = identifier[args] . identifier[gene_file]
identifier[no_novel_transcripts] = identifier[args] . identifier[no_novel_transcripts]
identifier[output_file] = identifier[args] . identifier[output_file]
identifier[log_file] = identifier[args] . identifier[log_file]
identifier[quiet] = identifier[args] . identifier[quiet]
identifier[verbose] = identifier[args] . identifier[verbose]
identifier[logger] = identifier[misc] . identifier[get_logger] ( identifier[log_file] = identifier[log_file] , identifier[quiet] = identifier[quiet] , identifier[verbose] = identifier[verbose] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[genes] = identifier[misc] . identifier[read_single] ( identifier[gene_file] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[stringtie_genes] ={}
keyword[with] identifier[open] ( identifier[stringtie_file] ) keyword[as] identifier[fh] :
identifier[reader] = identifier[csv] . identifier[reader] ( identifier[fh] , identifier[dialect] = literal[string] )
keyword[for] identifier[l] keyword[in] identifier[reader] :
keyword[if] identifier[l] [ literal[int] ][ literal[int] ]== literal[string] :
keyword[continue]
keyword[assert] identifier[len] ( identifier[l] )== literal[int]
keyword[if] identifier[l] [ literal[int] ]!= literal[string] :
keyword[continue]
identifier[attr] = identifier[parse_attributes] ( identifier[l] [ literal[int] ])
keyword[try] :
identifier[ref_gene] = identifier[attr] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[continue]
keyword[else] :
keyword[try] :
identifier[g] = identifier[stringtie_genes] [ identifier[attr] [ literal[string] ]]
keyword[except] identifier[KeyError] :
identifier[stringtie_genes] [ identifier[attr] [ literal[string] ]]={ identifier[ref_gene] ,}
keyword[else] :
identifier[g] . identifier[add] ( identifier[ref_gene] )
identifier[logger] . identifier[info] ( literal[string] ,
identifier[len] ( identifier[stringtie_genes] ))
identifier[gene_ids_ambiguous] =[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[stringtie_genes] . identifier[items] ()
keyword[if] identifier[len] ( identifier[v] )> literal[int] ]
identifier[n] = identifier[len] ( identifier[gene_ids_ambiguous] )
identifier[logger] . identifier[info] ( literal[string] ,
identifier[n] , identifier[len] ( identifier[stringtie_genes] ), literal[int] *( identifier[n] / identifier[float] ( identifier[len] ( identifier[stringtie_genes] ))))
identifier[n] = identifier[len] ( identifier[genes] )
identifier[fpkm] = identifier[np] . identifier[zeros] ( identifier[n] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[tpm] = identifier[np] . identifier[zeros] ( identifier[n] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[fpkm_novel_gene] = literal[int]
identifier[fpkm_unknown_gene_name] = literal[int]
identifier[fpkm_novel_trans] = literal[int]
identifier[fpkm_ambig] = literal[int]
keyword[with] identifier[open] ( identifier[stringtie_file] ) keyword[as] identifier[fh] :
identifier[reader] = identifier[csv] . identifier[reader] ( identifier[fh] , identifier[dialect] = literal[string] )
keyword[for] identifier[l] keyword[in] identifier[reader] :
keyword[if] identifier[l] [ literal[int] ][ literal[int] ]== literal[string] :
keyword[continue]
keyword[assert] identifier[len] ( identifier[l] )== literal[int]
keyword[if] identifier[l] [ literal[int] ]!= literal[string] :
keyword[continue]
identifier[attr] = identifier[parse_attributes] ( identifier[l] [ literal[int] ])
identifier[f] = identifier[float] ( identifier[attr] [ literal[string] ])
keyword[try] :
identifier[g] = identifier[attr] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[if] identifier[no_novel_transcripts] :
identifier[fpkm_novel_trans] += identifier[f]
keyword[continue]
keyword[else] :
keyword[try] :
identifier[assoc] = identifier[stringtie_genes] [ identifier[attr] [ literal[string] ]]
keyword[except] identifier[KeyError] :
identifier[fpkm_novel_gene] += identifier[f]
keyword[continue]
keyword[else] :
keyword[if] identifier[len] ( identifier[assoc] )> literal[int] :
identifier[fpkm_ambig] += identifier[f]
keyword[continue]
keyword[else] :
identifier[g] = identifier[list] ( identifier[assoc] )[ literal[int] ]
keyword[try] :
identifier[idx] = identifier[misc] . identifier[bisect_index] ( identifier[genes] , identifier[g] )
keyword[except] identifier[ValueError] :
identifier[fpkm_unknown_gene_name] += identifier[f]
identifier[logger] . identifier[warning] ( literal[string] , identifier[g] )
keyword[continue]
identifier[t] = identifier[float] ( identifier[attr] [ literal[string] ])
identifier[fpkm] [ identifier[idx] ]+= identifier[f]
identifier[tpm] [ identifier[idx] ]+= identifier[t]
keyword[if] identifier[no_novel_transcripts] :
identifier[ignored_fpkm] = identifier[fpkm_novel_trans] + identifier[fpkm_unknown_gene_name]
keyword[else] :
identifier[ignored_fpkm] = identifier[fpkm_novel_gene] + identifier[fpkm_ambig] + identifier[fpkm_unknown_gene_name]
identifier[total_fpkm] = identifier[np] . identifier[sum] ( identifier[fpkm] )+ identifier[ignored_fpkm]
identifier[logger] . identifier[info] ( literal[string] , identifier[ignored_fpkm] ,
identifier[total_fpkm] , literal[int] *( identifier[ignored_fpkm] / identifier[total_fpkm] ))
keyword[if] identifier[no_novel_transcripts] keyword[and] identifier[fpkm_novel_trans] > literal[int] :
identifier[logger] . identifier[info] ( literal[string] ,
identifier[fpkm_novel_trans] , literal[int] *( identifier[fpkm_novel_trans] / identifier[total_fpkm] ))
keyword[else] :
keyword[if] identifier[fpkm_novel_gene] > literal[int] :
identifier[logger] . identifier[info] ( literal[string]
literal[string] ,
identifier[fpkm_novel_gene] , literal[int] *( identifier[fpkm_novel_gene] / identifier[total_fpkm] ))
keyword[if] identifier[fpkm_ambig] > literal[int] :
identifier[logger] . identifier[info] ( literal[string]
literal[string] ,
identifier[fpkm_ambig] , literal[int] *( identifier[fpkm_ambig] / identifier[total_fpkm] ))
keyword[if] identifier[fpkm_unknown_gene_name] > literal[int] :
identifier[logger] . identifier[info] ( literal[string]
literal[string] ,
identifier[fpkm_unknown_gene_name] ,
literal[int] *( identifier[fpkm_unknown_gene_name] / identifier[total_fpkm] ))
identifier[E] = identifier[np] . identifier[c_] [ identifier[fpkm] , identifier[tpm] ]
keyword[with] identifier[open] ( identifier[output_file] , literal[string] ) keyword[as] identifier[ofh] :
identifier[writer] = identifier[csv] . identifier[writer] ( identifier[ofh] , identifier[dialect] = literal[string] ,
identifier[lineterminator] = identifier[os] . identifier[linesep] ,
identifier[quoting] = identifier[csv] . identifier[QUOTE_NONE] )
keyword[for] identifier[i] , identifier[g] keyword[in] identifier[enumerate] ( identifier[genes] ):
identifier[writer] . identifier[writerow] ([ identifier[g] ]+[ literal[string] % identifier[e] keyword[for] identifier[e] keyword[in] identifier[E] [ identifier[i] ,:]])
keyword[return] literal[int] | def main(args=None):
"""Extracts gene-level expression data from StringTie output.
Parameters
----------
args: argparse.Namespace object, optional
The argument values. If not specified, the values will be obtained by
parsing the command line arguments using the `argparse` module.
Returns
-------
int
Exit code (0 if no error occurred).
"""
if args is None:
# parse command-line arguments
parser = get_argument_parser()
args = parser.parse_args() # depends on [control=['if'], data=['args']]
stringtie_file = args.stringtie_file
gene_file = args.gene_file
no_novel_transcripts = args.no_novel_transcripts
output_file = args.output_file
log_file = args.log_file
quiet = args.quiet
verbose = args.verbose
logger = misc.get_logger(log_file=log_file, quiet=quiet, verbose=verbose)
# read list of gene symbols
logger.info('Reading gene data...')
genes = misc.read_single(gene_file)
# read StringTie output file and summarize FPKM and TPM per gene
logger.info('Parsing StringTie output...')
logger.info('Associating StringTie gene IDs with gene symbols...')
stringtie_genes = {}
with open(stringtie_file) as fh:
reader = csv.reader(fh, dialect='excel-tab')
for l in reader:
if l[0][0] == '#':
continue # depends on [control=['if'], data=[]]
assert len(l) == 9
if l[2] != 'transcript':
continue # depends on [control=['if'], data=[]]
attr = parse_attributes(l[8])
try:
ref_gene = attr['ref_gene_name'] # depends on [control=['try'], data=[]]
except KeyError:
continue # depends on [control=['except'], data=[]]
else:
# entry has a "ref_gene_name" attribute
try:
g = stringtie_genes[attr['gene_id']] # depends on [control=['try'], data=[]]
except KeyError:
stringtie_genes[attr['gene_id']] = {ref_gene} # depends on [control=['except'], data=[]]
else:
g.add(ref_gene) # depends on [control=['for'], data=['l']] # depends on [control=['with'], data=['fh']]
logger.info('Associated %d gene IDs with gene symbols.', len(stringtie_genes))
# C = Counter(len(v) for v in stringtie_genes.itervalues())
gene_ids_ambiguous = [k for (k, v) in stringtie_genes.items() if len(v) > 1]
n = len(gene_ids_ambiguous)
logger.info('%d / %d associated with multiple gene symbols (%.1f%%).', n, len(stringtie_genes), 100 * (n / float(len(stringtie_genes))))
# read StringTie output file and summarize FPKM and TPM per gene
n = len(genes)
fpkm = np.zeros(n, dtype=np.float64)
tpm = np.zeros(n, dtype=np.float64)
fpkm_novel_gene = 0
fpkm_unknown_gene_name = 0
fpkm_novel_trans = 0
fpkm_ambig = 0
with open(stringtie_file) as fh:
reader = csv.reader(fh, dialect='excel-tab')
for l in reader:
if l[0][0] == '#':
# skip header
continue # depends on [control=['if'], data=[]]
assert len(l) == 9
if l[2] != 'transcript':
# skip exon lines
continue # depends on [control=['if'], data=[]]
attr = parse_attributes(l[8])
f = float(attr['FPKM'])
try:
g = attr['ref_gene_name'] # depends on [control=['try'], data=[]]
except KeyError:
if no_novel_transcripts:
# ignore this transcript
fpkm_novel_trans += f
continue # depends on [control=['if'], data=[]]
else:
# see if we can assign a gene name based on the gene ID
try:
assoc = stringtie_genes[attr['gene_id']] # depends on [control=['try'], data=[]]
except KeyError:
# gene_id not associated with any reference gene
fpkm_novel_gene += f
continue # depends on [control=['except'], data=[]]
else:
if len(assoc) > 1:
# gene ID associated with multiple ref. genes
# => ingored
fpkm_ambig += f
continue # depends on [control=['if'], data=[]]
else:
# gene ID associated with exactly one ref. gene
g = list(assoc)[0] # depends on [control=['except'], data=[]]
try:
idx = misc.bisect_index(genes, g) # depends on [control=['try'], data=[]]
except ValueError:
fpkm_unknown_gene_name += f
logger.warning('Unknown gene name: "%s".', g)
continue # depends on [control=['except'], data=[]]
t = float(attr['TPM'])
fpkm[idx] += f
tpm[idx] += t # depends on [control=['for'], data=['l']] # depends on [control=['with'], data=['fh']]
# ignored_fpkm = None
if no_novel_transcripts:
ignored_fpkm = fpkm_novel_trans + fpkm_unknown_gene_name # depends on [control=['if'], data=[]]
else:
ignored_fpkm = fpkm_novel_gene + fpkm_ambig + fpkm_unknown_gene_name
total_fpkm = np.sum(fpkm) + ignored_fpkm
logger.info('Ignored %.1f / %.1f FPKM (%.1f%%)', ignored_fpkm, total_fpkm, 100 * (ignored_fpkm / total_fpkm))
if no_novel_transcripts and fpkm_novel_trans > 0:
logger.info('Ignored %.1f FPKM from novel transcripts (%.1f%%).', fpkm_novel_trans, 100 * (fpkm_novel_trans / total_fpkm)) # depends on [control=['if'], data=[]]
else:
if fpkm_novel_gene > 0:
logger.info('Ignored %.1f FPKM from transcripts of novel genes (%.1f%%).', fpkm_novel_gene, 100 * (fpkm_novel_gene / total_fpkm)) # depends on [control=['if'], data=['fpkm_novel_gene']]
if fpkm_ambig > 0:
logger.info('Ignored %.1f FPKM from transcripts with ambiguous gene membership (%.1f%%).', fpkm_ambig, 100 * (fpkm_ambig / total_fpkm)) # depends on [control=['if'], data=['fpkm_ambig']]
if fpkm_unknown_gene_name > 0:
logger.info('Ignored %.1f FPKM from transcripts of genes with unknown names (%.1f%%).', fpkm_unknown_gene_name, 100 * (fpkm_unknown_gene_name / total_fpkm)) # depends on [control=['if'], data=['fpkm_unknown_gene_name']]
# write output file
E = np.c_[fpkm, tpm]
with open(output_file, 'w') as ofh:
writer = csv.writer(ofh, dialect='excel-tab', lineterminator=os.linesep, quoting=csv.QUOTE_NONE)
for (i, g) in enumerate(genes):
writer.writerow([g] + ['%.5f' % e for e in E[i, :]]) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['ofh']]
return 0 |
def get_timeplaceslice(self, placeindex) -> \
Union[Tuple[slice, int], Tuple[int, slice]]:
"""Return a |tuple| for indexing a complete time series of a certain
location available in |NetCDFVariableBase.array|.
>>> from hydpy.core.netcdftools import NetCDFVariableBase
>>> from hydpy import make_abc_testable
>>> NCVar = make_abc_testable(NetCDFVariableBase)
>>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1)
>>> ncvar.get_timeplaceslice(2)
(2, slice(None, None, None))
>>> ncvar = NetCDFVariableDeep('test', isolate=False, timeaxis=0)
>>> ncvar.get_timeplaceslice(2)
(slice(None, None, None), 2)
"""
return self.sort_timeplaceentries(slice(None), int(placeindex)) | def function[get_timeplaceslice, parameter[self, placeindex]]:
constant[Return a |tuple| for indexing a complete time series of a certain
location available in |NetCDFVariableBase.array|.
>>> from hydpy.core.netcdftools import NetCDFVariableBase
>>> from hydpy import make_abc_testable
>>> NCVar = make_abc_testable(NetCDFVariableBase)
>>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1)
>>> ncvar.get_timeplaceslice(2)
(2, slice(None, None, None))
>>> ncvar = NetCDFVariableDeep('test', isolate=False, timeaxis=0)
>>> ncvar.get_timeplaceslice(2)
(slice(None, None, None), 2)
]
return[call[name[self].sort_timeplaceentries, parameter[call[name[slice], parameter[constant[None]]], call[name[int], parameter[name[placeindex]]]]]] | keyword[def] identifier[get_timeplaceslice] ( identifier[self] , identifier[placeindex] )-> identifier[Union] [ identifier[Tuple] [ identifier[slice] , identifier[int] ], identifier[Tuple] [ identifier[int] , identifier[slice] ]]:
literal[string]
keyword[return] identifier[self] . identifier[sort_timeplaceentries] ( identifier[slice] ( keyword[None] ), identifier[int] ( identifier[placeindex] )) | def get_timeplaceslice(self, placeindex) -> Union[Tuple[slice, int], Tuple[int, slice]]:
"""Return a |tuple| for indexing a complete time series of a certain
location available in |NetCDFVariableBase.array|.
>>> from hydpy.core.netcdftools import NetCDFVariableBase
>>> from hydpy import make_abc_testable
>>> NCVar = make_abc_testable(NetCDFVariableBase)
>>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1)
>>> ncvar.get_timeplaceslice(2)
(2, slice(None, None, None))
>>> ncvar = NetCDFVariableDeep('test', isolate=False, timeaxis=0)
>>> ncvar.get_timeplaceslice(2)
(slice(None, None, None), 2)
"""
return self.sort_timeplaceentries(slice(None), int(placeindex)) |
def _get_left_right_blocks(x):
"""Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dimension.
Args:
x: a 6-d tensor.
Returns:
x_left_blocks, x_right_blocks: Two 6-d tensors
"""
(_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h,
x_memory_flange_w, depth) = common_layers.shape_list(x)
x_left_right_blocks = tf.slice(x,
[0, 1, 0, 0, 0, 0],
[-1, x_num_outer_h_blocks-2, -1, -1,
-1, -1])
num_blocks_h = (x_num_outer_h_blocks-2)//2
x_left_right_blocks = tf.reshape(x_left_right_blocks,
[-1,
num_blocks_h,
2, x_num_outer_w_blocks,
x_memory_flange_h,
x_memory_flange_w, depth])
x_left_right_blocks = tf.transpose(x_left_right_blocks,
[0, 1, 3, 2, 4, 5, 6])
x_left_right_blocks = tf.reshape(x_left_right_blocks,
[-1, num_blocks_h,
x_num_outer_w_blocks, 2*x_memory_flange_h,
x_memory_flange_w, depth])
# get it ready for splitting the left and right memory blocks
x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks)
return x_left_blocks, x_right_blocks | def function[_get_left_right_blocks, parameter[x]]:
constant[Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dimension.
Args:
x: a 6-d tensor.
Returns:
x_left_blocks, x_right_blocks: Two 6-d tensors
]
<ast.Tuple object at 0x7da1b26af5e0> assign[=] call[name[common_layers].shape_list, parameter[name[x]]]
variable[x_left_right_blocks] assign[=] call[name[tf].slice, parameter[name[x], list[[<ast.Constant object at 0x7da1b26ac490>, <ast.Constant object at 0x7da1b26aea10>, <ast.Constant object at 0x7da1b26ac970>, <ast.Constant object at 0x7da1b26acb80>, <ast.Constant object at 0x7da1b26aeb00>, <ast.Constant object at 0x7da1b26aee90>]], list[[<ast.UnaryOp object at 0x7da1b26afa90>, <ast.BinOp object at 0x7da1b26ac220>, <ast.UnaryOp object at 0x7da1b26ac430>, <ast.UnaryOp object at 0x7da1b26adff0>, <ast.UnaryOp object at 0x7da1b26aece0>, <ast.UnaryOp object at 0x7da1b26af310>]]]]
variable[num_blocks_h] assign[=] binary_operation[binary_operation[name[x_num_outer_h_blocks] - constant[2]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[x_left_right_blocks] assign[=] call[name[tf].reshape, parameter[name[x_left_right_blocks], list[[<ast.UnaryOp object at 0x7da1b26ae620>, <ast.Name object at 0x7da1b26add80>, <ast.Constant object at 0x7da1b26adbd0>, <ast.Name object at 0x7da1b26af0a0>, <ast.Name object at 0x7da1b26acd30>, <ast.Name object at 0x7da1b26ad900>, <ast.Name object at 0x7da1b26afe50>]]]]
variable[x_left_right_blocks] assign[=] call[name[tf].transpose, parameter[name[x_left_right_blocks], list[[<ast.Constant object at 0x7da1b26ac8b0>, <ast.Constant object at 0x7da1b26af580>, <ast.Constant object at 0x7da1b26aeef0>, <ast.Constant object at 0x7da1b26af8e0>, <ast.Constant object at 0x7da1b26ada80>, <ast.Constant object at 0x7da1b26af070>, <ast.Constant object at 0x7da1b26ae530>]]]]
variable[x_left_right_blocks] assign[=] call[name[tf].reshape, parameter[name[x_left_right_blocks], list[[<ast.UnaryOp object at 0x7da1b1e11b40>, <ast.Name object at 0x7da1b1e137f0>, <ast.Name object at 0x7da1b1e12b30>, <ast.BinOp object at 0x7da1b1e13130>, <ast.Name object at 0x7da1b1e124a0>, <ast.Name object at 0x7da1b1e10e20>]]]]
<ast.Tuple object at 0x7da1b1e10df0> assign[=] call[name[_split_along_width], parameter[name[x_left_right_blocks]]]
return[tuple[[<ast.Name object at 0x7da1b1e11b10>, <ast.Name object at 0x7da1b1e10550>]]] | keyword[def] identifier[_get_left_right_blocks] ( identifier[x] ):
literal[string]
( identifier[_] , identifier[x_num_outer_h_blocks] , identifier[x_num_outer_w_blocks] , identifier[x_memory_flange_h] ,
identifier[x_memory_flange_w] , identifier[depth] )= identifier[common_layers] . identifier[shape_list] ( identifier[x] )
identifier[x_left_right_blocks] = identifier[tf] . identifier[slice] ( identifier[x] ,
[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ],
[- literal[int] , identifier[x_num_outer_h_blocks] - literal[int] ,- literal[int] ,- literal[int] ,
- literal[int] ,- literal[int] ])
identifier[num_blocks_h] =( identifier[x_num_outer_h_blocks] - literal[int] )// literal[int]
identifier[x_left_right_blocks] = identifier[tf] . identifier[reshape] ( identifier[x_left_right_blocks] ,
[- literal[int] ,
identifier[num_blocks_h] ,
literal[int] , identifier[x_num_outer_w_blocks] ,
identifier[x_memory_flange_h] ,
identifier[x_memory_flange_w] , identifier[depth] ])
identifier[x_left_right_blocks] = identifier[tf] . identifier[transpose] ( identifier[x_left_right_blocks] ,
[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[x_left_right_blocks] = identifier[tf] . identifier[reshape] ( identifier[x_left_right_blocks] ,
[- literal[int] , identifier[num_blocks_h] ,
identifier[x_num_outer_w_blocks] , literal[int] * identifier[x_memory_flange_h] ,
identifier[x_memory_flange_w] , identifier[depth] ])
identifier[x_left_blocks] , identifier[x_right_blocks] = identifier[_split_along_width] ( identifier[x_left_right_blocks] )
keyword[return] identifier[x_left_blocks] , identifier[x_right_blocks] | def _get_left_right_blocks(x):
"""Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dimension.
Args:
x: a 6-d tensor.
Returns:
x_left_blocks, x_right_blocks: Two 6-d tensors
"""
(_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth) = common_layers.shape_list(x)
x_left_right_blocks = tf.slice(x, [0, 1, 0, 0, 0, 0], [-1, x_num_outer_h_blocks - 2, -1, -1, -1, -1])
num_blocks_h = (x_num_outer_h_blocks - 2) // 2
x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, num_blocks_h, 2, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth])
x_left_right_blocks = tf.transpose(x_left_right_blocks, [0, 1, 3, 2, 4, 5, 6])
x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, num_blocks_h, x_num_outer_w_blocks, 2 * x_memory_flange_h, x_memory_flange_w, depth])
# get it ready for splitting the left and right memory blocks
(x_left_blocks, x_right_blocks) = _split_along_width(x_left_right_blocks)
return (x_left_blocks, x_right_blocks) |
def strkey(val, chaffify=1, keyspace=string.ascii_letters + string.digits):
""" Converts integers to a sequence of strings, and reverse.
This is not intended to obfuscate numbers in any kind of
cryptographically secure way, in fact it's the opposite. It's
for predictable, reversable, obfuscation. It can also be used to
transform a random bit integer to a string of the same bit
length.
@val: #int or #str
@chaffify: #int multiple to avoid 0=a, 1=b, 2=c, ... obfuscates the
ordering
@keyspace: #str allowed output chars
-> #str if @val is #int, #int if @val is #str
..
from vital.security import strkey
strkey(0, chaffify=1)
# -> b
strkey(0, chaffify=4)
# -> e
strkey(90000000000050500502200302035023)
# -> 'f3yMpJQUazIZHp1UO7k'
strkey('f3yMpJQUazIZHp1UO7k')
# -> 90000000000050500502200302035023
strkey(2000000, chaffify=200000000000)
# -> 'DIaqtyo2sC'
..
"""
chaffify = chaffify or 1
keylen = len(keyspace)
try:
# INT TO STRING
if val < 0:
raise ValueError("Input value must be greater than -1.")
# chaffify the value
val = val * chaffify
if val == 0:
return keyspace[0]
# output the new string value
out = []
out_add = out.append
while val > 0:
val, digit = divmod(val, keylen)
out_add(keyspace[digit])
return "".join(out)[::-1]
except TypeError:
# STRING TO INT
out = 0
val = str(val)
find = str.find
for c in val:
out = out * keylen + find(keyspace, c)
# dechaffify the value
out = out // chaffify
return int(out) | def function[strkey, parameter[val, chaffify, keyspace]]:
constant[ Converts integers to a sequence of strings, and reverse.
This is not intended to obfuscate numbers in any kind of
cryptographically secure way, in fact it's the opposite. It's
for predictable, reversable, obfuscation. It can also be used to
transform a random bit integer to a string of the same bit
length.
@val: #int or #str
@chaffify: #int multiple to avoid 0=a, 1=b, 2=c, ... obfuscates the
ordering
@keyspace: #str allowed output chars
-> #str if @val is #int, #int if @val is #str
..
from vital.security import strkey
strkey(0, chaffify=1)
# -> b
strkey(0, chaffify=4)
# -> e
strkey(90000000000050500502200302035023)
# -> 'f3yMpJQUazIZHp1UO7k'
strkey('f3yMpJQUazIZHp1UO7k')
# -> 90000000000050500502200302035023
strkey(2000000, chaffify=200000000000)
# -> 'DIaqtyo2sC'
..
]
variable[chaffify] assign[=] <ast.BoolOp object at 0x7da1b10a42b0>
variable[keylen] assign[=] call[name[len], parameter[name[keyspace]]]
<ast.Try object at 0x7da1b10a4880> | keyword[def] identifier[strkey] ( identifier[val] , identifier[chaffify] = literal[int] , identifier[keyspace] = identifier[string] . identifier[ascii_letters] + identifier[string] . identifier[digits] ):
literal[string]
identifier[chaffify] = identifier[chaffify] keyword[or] literal[int]
identifier[keylen] = identifier[len] ( identifier[keyspace] )
keyword[try] :
keyword[if] identifier[val] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[val] = identifier[val] * identifier[chaffify]
keyword[if] identifier[val] == literal[int] :
keyword[return] identifier[keyspace] [ literal[int] ]
identifier[out] =[]
identifier[out_add] = identifier[out] . identifier[append]
keyword[while] identifier[val] > literal[int] :
identifier[val] , identifier[digit] = identifier[divmod] ( identifier[val] , identifier[keylen] )
identifier[out_add] ( identifier[keyspace] [ identifier[digit] ])
keyword[return] literal[string] . identifier[join] ( identifier[out] )[::- literal[int] ]
keyword[except] identifier[TypeError] :
identifier[out] = literal[int]
identifier[val] = identifier[str] ( identifier[val] )
identifier[find] = identifier[str] . identifier[find]
keyword[for] identifier[c] keyword[in] identifier[val] :
identifier[out] = identifier[out] * identifier[keylen] + identifier[find] ( identifier[keyspace] , identifier[c] )
identifier[out] = identifier[out] // identifier[chaffify]
keyword[return] identifier[int] ( identifier[out] ) | def strkey(val, chaffify=1, keyspace=string.ascii_letters + string.digits):
""" Converts integers to a sequence of strings, and reverse.
This is not intended to obfuscate numbers in any kind of
cryptographically secure way, in fact it's the opposite. It's
for predictable, reversable, obfuscation. It can also be used to
transform a random bit integer to a string of the same bit
length.
@val: #int or #str
@chaffify: #int multiple to avoid 0=a, 1=b, 2=c, ... obfuscates the
ordering
@keyspace: #str allowed output chars
-> #str if @val is #int, #int if @val is #str
..
from vital.security import strkey
strkey(0, chaffify=1)
# -> b
strkey(0, chaffify=4)
# -> e
strkey(90000000000050500502200302035023)
# -> 'f3yMpJQUazIZHp1UO7k'
strkey('f3yMpJQUazIZHp1UO7k')
# -> 90000000000050500502200302035023
strkey(2000000, chaffify=200000000000)
# -> 'DIaqtyo2sC'
..
"""
chaffify = chaffify or 1
keylen = len(keyspace)
try:
# INT TO STRING
if val < 0:
raise ValueError('Input value must be greater than -1.') # depends on [control=['if'], data=[]]
# chaffify the value
val = val * chaffify
if val == 0:
return keyspace[0] # depends on [control=['if'], data=[]]
# output the new string value
out = []
out_add = out.append
while val > 0:
(val, digit) = divmod(val, keylen)
out_add(keyspace[digit]) # depends on [control=['while'], data=['val']]
return ''.join(out)[::-1] # depends on [control=['try'], data=[]]
except TypeError:
# STRING TO INT
out = 0
val = str(val)
find = str.find
for c in val:
out = out * keylen + find(keyspace, c) # depends on [control=['for'], data=['c']]
# dechaffify the value
out = out // chaffify
return int(out) # depends on [control=['except'], data=[]] |
def make_replacement_visitor(find_expression, replace_expression):
"""Return a visitor function that replaces every instance of one expression with another one."""
def visitor_fn(expression):
"""Return the replacement if this expression matches the expression we're looking for."""
if expression == find_expression:
return replace_expression
else:
return expression
return visitor_fn | def function[make_replacement_visitor, parameter[find_expression, replace_expression]]:
constant[Return a visitor function that replaces every instance of one expression with another one.]
def function[visitor_fn, parameter[expression]]:
constant[Return the replacement if this expression matches the expression we're looking for.]
if compare[name[expression] equal[==] name[find_expression]] begin[:]
return[name[replace_expression]]
return[name[visitor_fn]] | keyword[def] identifier[make_replacement_visitor] ( identifier[find_expression] , identifier[replace_expression] ):
literal[string]
keyword[def] identifier[visitor_fn] ( identifier[expression] ):
literal[string]
keyword[if] identifier[expression] == identifier[find_expression] :
keyword[return] identifier[replace_expression]
keyword[else] :
keyword[return] identifier[expression]
keyword[return] identifier[visitor_fn] | def make_replacement_visitor(find_expression, replace_expression):
"""Return a visitor function that replaces every instance of one expression with another one."""
def visitor_fn(expression):
"""Return the replacement if this expression matches the expression we're looking for."""
if expression == find_expression:
return replace_expression # depends on [control=['if'], data=[]]
else:
return expression
return visitor_fn |
def from_xml(cls, child, result=None):
"""
Create new RelationMember from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this element belongs to
:type result: overpy.Result
:return: New relation member oject
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
"""
if child.attrib.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
ref = child.attrib.get("ref")
if ref is not None:
ref = int(ref)
role = child.attrib.get("role")
attributes = {}
ignore = ["geometry", "ref", "role", "type"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
geometry = None
for sub_child in child:
if sub_child.tag.lower() == "nd":
if geometry is None:
geometry = []
geometry.append(
RelationWayGeometryValue(
lat=Decimal(sub_child.attrib["lat"]),
lon=Decimal(sub_child.attrib["lon"])
)
)
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
) | def function[from_xml, parameter[cls, child, result]]:
constant[
Create new RelationMember from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this element belongs to
:type result: overpy.Result
:return: New relation member oject
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
]
if compare[call[name[child].attrib.get, parameter[constant[type]]] not_equal[!=] name[cls]._type_value] begin[:]
<ast.Raise object at 0x7da1b043e7a0>
variable[ref] assign[=] call[name[child].attrib.get, parameter[constant[ref]]]
if compare[name[ref] is_not constant[None]] begin[:]
variable[ref] assign[=] call[name[int], parameter[name[ref]]]
variable[role] assign[=] call[name[child].attrib.get, parameter[constant[role]]]
variable[attributes] assign[=] dictionary[[], []]
variable[ignore] assign[=] list[[<ast.Constant object at 0x7da1b041b6d0>, <ast.Constant object at 0x7da1b0418430>, <ast.Constant object at 0x7da1b0419f90>, <ast.Constant object at 0x7da1b04196f0>]]
for taget[tuple[[<ast.Name object at 0x7da1b041b220>, <ast.Name object at 0x7da1b041ac80>]]] in starred[call[name[child].attrib.items, parameter[]]] begin[:]
if compare[name[n] in name[ignore]] begin[:]
continue
call[name[attributes]][name[n]] assign[=] name[v]
variable[geometry] assign[=] constant[None]
for taget[name[sub_child]] in starred[name[child]] begin[:]
if compare[call[name[sub_child].tag.lower, parameter[]] equal[==] constant[nd]] begin[:]
if compare[name[geometry] is constant[None]] begin[:]
variable[geometry] assign[=] list[[]]
call[name[geometry].append, parameter[call[name[RelationWayGeometryValue], parameter[]]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_xml] ( identifier[cls] , identifier[child] , identifier[result] = keyword[None] ):
literal[string]
keyword[if] identifier[child] . identifier[attrib] . identifier[get] ( literal[string] )!= identifier[cls] . identifier[_type_value] :
keyword[raise] identifier[exception] . identifier[ElementDataWrongType] (
identifier[type_expected] = identifier[cls] . identifier[_type_value] ,
identifier[type_provided] = identifier[child] . identifier[tag] . identifier[lower] ()
)
identifier[ref] = identifier[child] . identifier[attrib] . identifier[get] ( literal[string] )
keyword[if] identifier[ref] keyword[is] keyword[not] keyword[None] :
identifier[ref] = identifier[int] ( identifier[ref] )
identifier[role] = identifier[child] . identifier[attrib] . identifier[get] ( literal[string] )
identifier[attributes] ={}
identifier[ignore] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[n] , identifier[v] keyword[in] identifier[child] . identifier[attrib] . identifier[items] ():
keyword[if] identifier[n] keyword[in] identifier[ignore] :
keyword[continue]
identifier[attributes] [ identifier[n] ]= identifier[v]
identifier[geometry] = keyword[None]
keyword[for] identifier[sub_child] keyword[in] identifier[child] :
keyword[if] identifier[sub_child] . identifier[tag] . identifier[lower] ()== literal[string] :
keyword[if] identifier[geometry] keyword[is] keyword[None] :
identifier[geometry] =[]
identifier[geometry] . identifier[append] (
identifier[RelationWayGeometryValue] (
identifier[lat] = identifier[Decimal] ( identifier[sub_child] . identifier[attrib] [ literal[string] ]),
identifier[lon] = identifier[Decimal] ( identifier[sub_child] . identifier[attrib] [ literal[string] ])
)
)
keyword[return] identifier[cls] (
identifier[attributes] = identifier[attributes] ,
identifier[geometry] = identifier[geometry] ,
identifier[ref] = identifier[ref] ,
identifier[role] = identifier[role] ,
identifier[result] = identifier[result]
) | def from_xml(cls, child, result=None):
"""
Create new RelationMember from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this element belongs to
:type result: overpy.Result
:return: New relation member oject
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
"""
if child.attrib.get('type') != cls._type_value:
raise exception.ElementDataWrongType(type_expected=cls._type_value, type_provided=child.tag.lower()) # depends on [control=['if'], data=[]]
ref = child.attrib.get('ref')
if ref is not None:
ref = int(ref) # depends on [control=['if'], data=['ref']]
role = child.attrib.get('role')
attributes = {}
ignore = ['geometry', 'ref', 'role', 'type']
for (n, v) in child.attrib.items():
if n in ignore:
continue # depends on [control=['if'], data=[]]
attributes[n] = v # depends on [control=['for'], data=[]]
geometry = None
for sub_child in child:
if sub_child.tag.lower() == 'nd':
if geometry is None:
geometry = [] # depends on [control=['if'], data=['geometry']]
geometry.append(RelationWayGeometryValue(lat=Decimal(sub_child.attrib['lat']), lon=Decimal(sub_child.attrib['lon']))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sub_child']]
return cls(attributes=attributes, geometry=geometry, ref=ref, role=role, result=result) |
def create_token(self, data, token_valid_for=180) -> str:
""" Create encrypted JWT """
jwt_token = jwt.encode({
'data': data,
'exp': datetime.utcnow() + timedelta(seconds=token_valid_for)},
self.app_secret)
return Security.encrypt(jwt_token) | def function[create_token, parameter[self, data, token_valid_for]]:
constant[ Create encrypted JWT ]
variable[jwt_token] assign[=] call[name[jwt].encode, parameter[dictionary[[<ast.Constant object at 0x7da1b27f0dc0>, <ast.Constant object at 0x7da1b27f0d90>], [<ast.Name object at 0x7da1b27f0d60>, <ast.BinOp object at 0x7da1b27f0d30>]], name[self].app_secret]]
return[call[name[Security].encrypt, parameter[name[jwt_token]]]] | keyword[def] identifier[create_token] ( identifier[self] , identifier[data] , identifier[token_valid_for] = literal[int] )-> identifier[str] :
literal[string]
identifier[jwt_token] = identifier[jwt] . identifier[encode] ({
literal[string] : identifier[data] ,
literal[string] : identifier[datetime] . identifier[utcnow] ()+ identifier[timedelta] ( identifier[seconds] = identifier[token_valid_for] )},
identifier[self] . identifier[app_secret] )
keyword[return] identifier[Security] . identifier[encrypt] ( identifier[jwt_token] ) | def create_token(self, data, token_valid_for=180) -> str:
""" Create encrypted JWT """
jwt_token = jwt.encode({'data': data, 'exp': datetime.utcnow() + timedelta(seconds=token_valid_for)}, self.app_secret)
return Security.encrypt(jwt_token) |
def draw_texture(tex):
"""Draw a 2D texture to the current viewport
Parameters
----------
tex : instance of Texture2D
The texture to draw.
"""
from .program import Program
program = Program(vert_draw, frag_draw)
program['u_texture'] = tex
program['a_position'] = [[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]
program['a_texcoord'] = [[0., 1.], [0., 0.], [1., 1.], [1., 0.]]
program.draw('triangle_strip') | def function[draw_texture, parameter[tex]]:
constant[Draw a 2D texture to the current viewport
Parameters
----------
tex : instance of Texture2D
The texture to draw.
]
from relative_module[program] import module[Program]
variable[program] assign[=] call[name[Program], parameter[name[vert_draw], name[frag_draw]]]
call[name[program]][constant[u_texture]] assign[=] name[tex]
call[name[program]][constant[a_position]] assign[=] list[[<ast.List object at 0x7da18dc99120>, <ast.List object at 0x7da18dc98910>, <ast.List object at 0x7da18dc992d0>, <ast.List object at 0x7da18dc9abc0>]]
call[name[program]][constant[a_texcoord]] assign[=] list[[<ast.List object at 0x7da18dc98700>, <ast.List object at 0x7da18dc9bbe0>, <ast.List object at 0x7da1b0f9ca90>, <ast.List object at 0x7da1b0f9f4c0>]]
call[name[program].draw, parameter[constant[triangle_strip]]] | keyword[def] identifier[draw_texture] ( identifier[tex] ):
literal[string]
keyword[from] . identifier[program] keyword[import] identifier[Program]
identifier[program] = identifier[Program] ( identifier[vert_draw] , identifier[frag_draw] )
identifier[program] [ literal[string] ]= identifier[tex]
identifier[program] [ literal[string] ]=[[- literal[int] ,- literal[int] ],[- literal[int] , literal[int] ],[ literal[int] ,- literal[int] ],[ literal[int] , literal[int] ]]
identifier[program] [ literal[string] ]=[[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ]]
identifier[program] . identifier[draw] ( literal[string] ) | def draw_texture(tex):
"""Draw a 2D texture to the current viewport
Parameters
----------
tex : instance of Texture2D
The texture to draw.
"""
from .program import Program
program = Program(vert_draw, frag_draw)
program['u_texture'] = tex
program['a_position'] = [[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]
program['a_texcoord'] = [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [1.0, 0.0]]
program.draw('triangle_strip') |
def write(self, filename = ""):
"""
Writes data from L{PE} object to a file.
@rtype: str
@return: The L{PE} stream data.
@raise IOError: If the file could not be opened for write operations.
"""
file_data = str(self)
if filename:
try:
self.__write(filename, file_data)
except IOError:
raise IOError("File could not be opened for write operations.")
else:
return file_data | def function[write, parameter[self, filename]]:
constant[
Writes data from L{PE} object to a file.
@rtype: str
@return: The L{PE} stream data.
@raise IOError: If the file could not be opened for write operations.
]
variable[file_data] assign[=] call[name[str], parameter[name[self]]]
if name[filename] begin[:]
<ast.Try object at 0x7da18c4cd030> | keyword[def] identifier[write] ( identifier[self] , identifier[filename] = literal[string] ):
literal[string]
identifier[file_data] = identifier[str] ( identifier[self] )
keyword[if] identifier[filename] :
keyword[try] :
identifier[self] . identifier[__write] ( identifier[filename] , identifier[file_data] )
keyword[except] identifier[IOError] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[else] :
keyword[return] identifier[file_data] | def write(self, filename=''):
"""
Writes data from L{PE} object to a file.
@rtype: str
@return: The L{PE} stream data.
@raise IOError: If the file could not be opened for write operations.
"""
file_data = str(self)
if filename:
try:
self.__write(filename, file_data) # depends on [control=['try'], data=[]]
except IOError:
raise IOError('File could not be opened for write operations.') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
return file_data |
def _return_assoc_tuple(self, objects):
"""
Create the property tuple for _imethod return of references,
referencenames, associators, and associatornames methods.
This is different than the get/enum imethod return tuples. It creates an
OBJECTPATH for each object in the return list.
_imethod call returns None when there are zero objects rather
than a tuple with empty object path
"""
if objects:
result = [(u'OBJECTPATH', {}, obj) for obj in objects]
return self._make_tuple(result)
return None | def function[_return_assoc_tuple, parameter[self, objects]]:
constant[
Create the property tuple for _imethod return of references,
referencenames, associators, and associatornames methods.
This is different than the get/enum imethod return tuples. It creates an
OBJECTPATH for each object in the return list.
_imethod call returns None when there are zero objects rather
than a tuple with empty object path
]
if name[objects] begin[:]
variable[result] assign[=] <ast.ListComp object at 0x7da18dc07fd0>
return[call[name[self]._make_tuple, parameter[name[result]]]]
return[constant[None]] | keyword[def] identifier[_return_assoc_tuple] ( identifier[self] , identifier[objects] ):
literal[string]
keyword[if] identifier[objects] :
identifier[result] =[( literal[string] ,{}, identifier[obj] ) keyword[for] identifier[obj] keyword[in] identifier[objects] ]
keyword[return] identifier[self] . identifier[_make_tuple] ( identifier[result] )
keyword[return] keyword[None] | def _return_assoc_tuple(self, objects):
"""
Create the property tuple for _imethod return of references,
referencenames, associators, and associatornames methods.
This is different than the get/enum imethod return tuples. It creates an
OBJECTPATH for each object in the return list.
_imethod call returns None when there are zero objects rather
than a tuple with empty object path
"""
if objects:
result = [(u'OBJECTPATH', {}, obj) for obj in objects]
return self._make_tuple(result) # depends on [control=['if'], data=[]]
return None |
def gen_timeout_request_renew(lease):
"""Generate time in seconds to retransmit DHCPREQUEST.
[:rfc:`2131#section-4..4.5`]::
In both RENEWING and REBINDING states,
if the client receives no response to its DHCPREQUEST
message, the client SHOULD wait one-half of the remaining
time until T2 (in RENEWING state) and one-half of the
remaining lease time (in REBINDING state), down to a
minimum of 60 seconds, before retransmitting the
DHCPREQUEST message.
"""
time_left = (lease.rebinding_time - lease.renewing_time) * RENEW_PERC
if time_left < 60:
time_left = 60
logger.debug('Next request in renew will happen on %s',
future_dt_str(nowutc(), time_left))
return time_left | def function[gen_timeout_request_renew, parameter[lease]]:
constant[Generate time in seconds to retransmit DHCPREQUEST.
[:rfc:`2131#section-4..4.5`]::
In both RENEWING and REBINDING states,
if the client receives no response to its DHCPREQUEST
message, the client SHOULD wait one-half of the remaining
time until T2 (in RENEWING state) and one-half of the
remaining lease time (in REBINDING state), down to a
minimum of 60 seconds, before retransmitting the
DHCPREQUEST message.
]
variable[time_left] assign[=] binary_operation[binary_operation[name[lease].rebinding_time - name[lease].renewing_time] * name[RENEW_PERC]]
if compare[name[time_left] less[<] constant[60]] begin[:]
variable[time_left] assign[=] constant[60]
call[name[logger].debug, parameter[constant[Next request in renew will happen on %s], call[name[future_dt_str], parameter[call[name[nowutc], parameter[]], name[time_left]]]]]
return[name[time_left]] | keyword[def] identifier[gen_timeout_request_renew] ( identifier[lease] ):
literal[string]
identifier[time_left] =( identifier[lease] . identifier[rebinding_time] - identifier[lease] . identifier[renewing_time] )* identifier[RENEW_PERC]
keyword[if] identifier[time_left] < literal[int] :
identifier[time_left] = literal[int]
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[future_dt_str] ( identifier[nowutc] (), identifier[time_left] ))
keyword[return] identifier[time_left] | def gen_timeout_request_renew(lease):
"""Generate time in seconds to retransmit DHCPREQUEST.
[:rfc:`2131#section-4..4.5`]::
In both RENEWING and REBINDING states,
if the client receives no response to its DHCPREQUEST
message, the client SHOULD wait one-half of the remaining
time until T2 (in RENEWING state) and one-half of the
remaining lease time (in REBINDING state), down to a
minimum of 60 seconds, before retransmitting the
DHCPREQUEST message.
"""
time_left = (lease.rebinding_time - lease.renewing_time) * RENEW_PERC
if time_left < 60:
time_left = 60 # depends on [control=['if'], data=['time_left']]
logger.debug('Next request in renew will happen on %s', future_dt_str(nowutc(), time_left))
return time_left |
def default(thumbnailer, prepared_options, source_filename,
thumbnail_extension, **kwargs):
"""
Easy-thumbnails' default name processor.
For example: ``source.jpg.100x100_q80_crop_upscale.jpg``
"""
filename_parts = [source_filename]
if ('%(opts)s' in thumbnailer.thumbnail_basedir or
'%(opts)s' in thumbnailer.thumbnail_subdir):
if thumbnail_extension != os.path.splitext(source_filename)[1][1:]:
filename_parts.append(thumbnail_extension)
else:
filename_parts += ['_'.join(prepared_options), thumbnail_extension]
return '.'.join(filename_parts) | def function[default, parameter[thumbnailer, prepared_options, source_filename, thumbnail_extension]]:
constant[
Easy-thumbnails' default name processor.
For example: ``source.jpg.100x100_q80_crop_upscale.jpg``
]
variable[filename_parts] assign[=] list[[<ast.Name object at 0x7da18f00fb20>]]
if <ast.BoolOp object at 0x7da18f00fbb0> begin[:]
if compare[name[thumbnail_extension] not_equal[!=] call[call[call[name[os].path.splitext, parameter[name[source_filename]]]][constant[1]]][<ast.Slice object at 0x7da20c9925f0>]] begin[:]
call[name[filename_parts].append, parameter[name[thumbnail_extension]]]
return[call[constant[.].join, parameter[name[filename_parts]]]] | keyword[def] identifier[default] ( identifier[thumbnailer] , identifier[prepared_options] , identifier[source_filename] ,
identifier[thumbnail_extension] ,** identifier[kwargs] ):
literal[string]
identifier[filename_parts] =[ identifier[source_filename] ]
keyword[if] ( literal[string] keyword[in] identifier[thumbnailer] . identifier[thumbnail_basedir] keyword[or]
literal[string] keyword[in] identifier[thumbnailer] . identifier[thumbnail_subdir] ):
keyword[if] identifier[thumbnail_extension] != identifier[os] . identifier[path] . identifier[splitext] ( identifier[source_filename] )[ literal[int] ][ literal[int] :]:
identifier[filename_parts] . identifier[append] ( identifier[thumbnail_extension] )
keyword[else] :
identifier[filename_parts] +=[ literal[string] . identifier[join] ( identifier[prepared_options] ), identifier[thumbnail_extension] ]
keyword[return] literal[string] . identifier[join] ( identifier[filename_parts] ) | def default(thumbnailer, prepared_options, source_filename, thumbnail_extension, **kwargs):
"""
Easy-thumbnails' default name processor.
For example: ``source.jpg.100x100_q80_crop_upscale.jpg``
"""
filename_parts = [source_filename]
if '%(opts)s' in thumbnailer.thumbnail_basedir or '%(opts)s' in thumbnailer.thumbnail_subdir:
if thumbnail_extension != os.path.splitext(source_filename)[1][1:]:
filename_parts.append(thumbnail_extension) # depends on [control=['if'], data=['thumbnail_extension']] # depends on [control=['if'], data=[]]
else:
filename_parts += ['_'.join(prepared_options), thumbnail_extension]
return '.'.join(filename_parts) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.