code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def default_storage_policy_assigned(name, policy, datastore):
'''
Assigns a default storage policy to a datastore
policy
Name of storage policy
datastore
Name of datastore
'''
log.info('Running state %s for policy \'%s\', datastore \'%s\'.',
name, policy, datastore)
changes = {}
changes_required = False
ret = {'name': name,
'changes': {},
'result': None,
'comment': None}
si = None
try:
si = __salt__['vsphere.get_service_instance_via_proxy']()
existing_policy = \
__salt__['vsphere.list_default_storage_policy_of_datastore'](
datastore=datastore, service_instance=si)
if existing_policy['name'] == policy:
comment = ('Storage policy \'{0}\' is already assigned to '
'datastore \'{1}\'. Nothing to be done.'
''.format(policy, datastore))
else:
changes_required = True
changes = {
'default_storage_policy': {'old': existing_policy['name'],
'new': policy}}
if __opts__['test']:
comment = ('State {0} will assign storage policy \'{1}\' to '
'datastore \'{2}\'.').format(name, policy,
datastore)
else:
__salt__['vsphere.assign_default_storage_policy_to_datastore'](
policy=policy, datastore=datastore, service_instance=si)
comment = ('Storage policy \'{0} was assigned to datastore '
'\'{1}\'.').format(policy, name)
log.info(comment)
except CommandExecutionError as exc:
log.error('Error: %s', exc)
if si:
__salt__['vsphere.disconnect'](si)
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
ret['comment'] = comment
if changes_required:
ret.update({
'changes': changes,
'result': None if __opts__['test'] else True,
})
else:
ret['result'] = True
return ret | def function[default_storage_policy_assigned, parameter[name, policy, datastore]]:
constant[
Assigns a default storage policy to a datastore
policy
Name of storage policy
datastore
Name of datastore
]
call[name[log].info, parameter[constant[Running state %s for policy '%s', datastore '%s'.], name[name], name[policy], name[datastore]]]
variable[changes] assign[=] dictionary[[], []]
variable[changes_required] assign[=] constant[False]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18dc05030>, <ast.Constant object at 0x7da18dc04460>, <ast.Constant object at 0x7da18dc05570>, <ast.Constant object at 0x7da18dc070a0>], [<ast.Name object at 0x7da18dc07460>, <ast.Dict object at 0x7da18dc041f0>, <ast.Constant object at 0x7da18dc05c30>, <ast.Constant object at 0x7da18dc04fa0>]]
variable[si] assign[=] constant[None]
<ast.Try object at 0x7da18dc06b60>
call[name[ret]][constant[comment]] assign[=] name[comment]
if name[changes_required] begin[:]
call[name[ret].update, parameter[dictionary[[<ast.Constant object at 0x7da18dc042e0>, <ast.Constant object at 0x7da18dc05210>], [<ast.Name object at 0x7da18dc05ff0>, <ast.IfExp object at 0x7da18dc05240>]]]]
return[name[ret]] | keyword[def] identifier[default_storage_policy_assigned] ( identifier[name] , identifier[policy] , identifier[datastore] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] ,
identifier[name] , identifier[policy] , identifier[datastore] )
identifier[changes] ={}
identifier[changes_required] = keyword[False]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[None] ,
literal[string] : keyword[None] }
identifier[si] = keyword[None]
keyword[try] :
identifier[si] = identifier[__salt__] [ literal[string] ]()
identifier[existing_policy] = identifier[__salt__] [ literal[string] ](
identifier[datastore] = identifier[datastore] , identifier[service_instance] = identifier[si] )
keyword[if] identifier[existing_policy] [ literal[string] ]== identifier[policy] :
identifier[comment] =( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[policy] , identifier[datastore] ))
keyword[else] :
identifier[changes_required] = keyword[True]
identifier[changes] ={
literal[string] :{ literal[string] : identifier[existing_policy] [ literal[string] ],
literal[string] : identifier[policy] }}
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[comment] =( literal[string]
literal[string] ). identifier[format] ( identifier[name] , identifier[policy] ,
identifier[datastore] )
keyword[else] :
identifier[__salt__] [ literal[string] ](
identifier[policy] = identifier[policy] , identifier[datastore] = identifier[datastore] , identifier[service_instance] = identifier[si] )
identifier[comment] =( literal[string]
literal[string] ). identifier[format] ( identifier[policy] , identifier[name] )
identifier[log] . identifier[info] ( identifier[comment] )
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[log] . identifier[error] ( literal[string] , identifier[exc] )
keyword[if] identifier[si] :
identifier[__salt__] [ literal[string] ]( identifier[si] )
identifier[ret] . identifier[update] ({ literal[string] : identifier[exc] . identifier[strerror] ,
literal[string] : keyword[False] keyword[if] keyword[not] identifier[__opts__] [ literal[string] ] keyword[else] keyword[None] })
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= identifier[comment]
keyword[if] identifier[changes_required] :
identifier[ret] . identifier[update] ({
literal[string] : identifier[changes] ,
literal[string] : keyword[None] keyword[if] identifier[__opts__] [ literal[string] ] keyword[else] keyword[True] ,
})
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[True]
keyword[return] identifier[ret] | def default_storage_policy_assigned(name, policy, datastore):
"""
Assigns a default storage policy to a datastore
policy
Name of storage policy
datastore
Name of datastore
"""
log.info("Running state %s for policy '%s', datastore '%s'.", name, policy, datastore)
changes = {}
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None}
si = None
try:
si = __salt__['vsphere.get_service_instance_via_proxy']()
existing_policy = __salt__['vsphere.list_default_storage_policy_of_datastore'](datastore=datastore, service_instance=si)
if existing_policy['name'] == policy:
comment = "Storage policy '{0}' is already assigned to datastore '{1}'. Nothing to be done.".format(policy, datastore) # depends on [control=['if'], data=['policy']]
else:
changes_required = True
changes = {'default_storage_policy': {'old': existing_policy['name'], 'new': policy}}
if __opts__['test']:
comment = "State {0} will assign storage policy '{1}' to datastore '{2}'.".format(name, policy, datastore) # depends on [control=['if'], data=[]]
else:
__salt__['vsphere.assign_default_storage_policy_to_datastore'](policy=policy, datastore=datastore, service_instance=si)
comment = "Storage policy '{0} was assigned to datastore '{1}'.".format(policy, name)
log.info(comment) # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
log.error('Error: %s', exc)
if si:
__salt__['vsphere.disconnect'](si) # depends on [control=['if'], data=[]]
ret.update({'comment': exc.strerror, 'result': False if not __opts__['test'] else None})
return ret # depends on [control=['except'], data=['exc']]
ret['comment'] = comment
if changes_required:
ret.update({'changes': changes, 'result': None if __opts__['test'] else True}) # depends on [control=['if'], data=[]]
else:
ret['result'] = True
return ret |
async def read(self):
""" Stop reading when gets None """
payload = await self._queue.get()
self._queue.task_done()
return payload | <ast.AsyncFunctionDef object at 0x7da1b1f38460> | keyword[async] keyword[def] identifier[read] ( identifier[self] ):
literal[string]
identifier[payload] = keyword[await] identifier[self] . identifier[_queue] . identifier[get] ()
identifier[self] . identifier[_queue] . identifier[task_done] ()
keyword[return] identifier[payload] | async def read(self):
""" Stop reading when gets None """
payload = await self._queue.get()
self._queue.task_done()
return payload |
def get_structure_from_name(self, structure_name):
"""
Return a structure from a name
Args:
structure_name (str): name of the structure
Returns:
Structure
"""
return next((st for st in self.structures if st.name == structure_name), None) | def function[get_structure_from_name, parameter[self, structure_name]]:
constant[
Return a structure from a name
Args:
structure_name (str): name of the structure
Returns:
Structure
]
return[call[name[next], parameter[<ast.GeneratorExp object at 0x7da20c6e4b80>, constant[None]]]] | keyword[def] identifier[get_structure_from_name] ( identifier[self] , identifier[structure_name] ):
literal[string]
keyword[return] identifier[next] (( identifier[st] keyword[for] identifier[st] keyword[in] identifier[self] . identifier[structures] keyword[if] identifier[st] . identifier[name] == identifier[structure_name] ), keyword[None] ) | def get_structure_from_name(self, structure_name):
"""
Return a structure from a name
Args:
structure_name (str): name of the structure
Returns:
Structure
"""
return next((st for st in self.structures if st.name == structure_name), None) |
def string(s, salt=None):
"""
获取一个字符串的 MD5 值
:param:
* s: (string) 需要进行 hash 的字符串
* salt: (string) 随机字符串,默认为 None
:return:
* result: (string) 32 位小写 MD5 值
"""
m = hashlib.md5()
s = s.encode('utf-8') + salt.encode('utf-8') if salt is not None else s.encode('utf-8')
m.update(s)
result = m.hexdigest()
return result | def function[string, parameter[s, salt]]:
constant[
获取一个字符串的 MD5 值
:param:
* s: (string) 需要进行 hash 的字符串
* salt: (string) 随机字符串,默认为 None
:return:
* result: (string) 32 位小写 MD5 值
]
variable[m] assign[=] call[name[hashlib].md5, parameter[]]
variable[s] assign[=] <ast.IfExp object at 0x7da1b08136d0>
call[name[m].update, parameter[name[s]]]
variable[result] assign[=] call[name[m].hexdigest, parameter[]]
return[name[result]] | keyword[def] identifier[string] ( identifier[s] , identifier[salt] = keyword[None] ):
literal[string]
identifier[m] = identifier[hashlib] . identifier[md5] ()
identifier[s] = identifier[s] . identifier[encode] ( literal[string] )+ identifier[salt] . identifier[encode] ( literal[string] ) keyword[if] identifier[salt] keyword[is] keyword[not] keyword[None] keyword[else] identifier[s] . identifier[encode] ( literal[string] )
identifier[m] . identifier[update] ( identifier[s] )
identifier[result] = identifier[m] . identifier[hexdigest] ()
keyword[return] identifier[result] | def string(s, salt=None):
"""
获取一个字符串的 MD5 值
:param:
* s: (string) 需要进行 hash 的字符串
* salt: (string) 随机字符串,默认为 None
:return:
* result: (string) 32 位小写 MD5 值
"""
m = hashlib.md5()
s = s.encode('utf-8') + salt.encode('utf-8') if salt is not None else s.encode('utf-8')
m.update(s)
result = m.hexdigest()
return result |
def update(self):
"""Update |C1| based on :math:`c_1 = \\frac{Damp}{1+Damp}`.
Examples:
The first examples show the calculated value of |C1| for
the lowest possible value of |Lag|, the lowest possible value,
and an intermediate value:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> damp(0.0)
>>> derived.c1.update()
>>> derived.c1
c1(0.0)
>>> damp(1.0)
>>> derived.c1.update()
>>> derived.c1
c1(0.5)
>>> damp(0.25)
>>> derived.c1.update()
>>> derived.c1
c1(0.2)
For to low and to high values of |Lag|, clipping is performed:
>>> damp.value = -0.1
>>> derived.c1.update()
>>> derived.c1
c1(0.0)
>>> damp.value = 1.1
>>> derived.c1.update()
>>> derived.c1
c1(0.5)
"""
damp = self.subpars.pars.control.damp
self(numpy.clip(damp/(1.+damp), 0., .5)) | def function[update, parameter[self]]:
constant[Update |C1| based on :math:`c_1 = \frac{Damp}{1+Damp}`.
Examples:
The first examples show the calculated value of |C1| for
the lowest possible value of |Lag|, the lowest possible value,
and an intermediate value:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> damp(0.0)
>>> derived.c1.update()
>>> derived.c1
c1(0.0)
>>> damp(1.0)
>>> derived.c1.update()
>>> derived.c1
c1(0.5)
>>> damp(0.25)
>>> derived.c1.update()
>>> derived.c1
c1(0.2)
For to low and to high values of |Lag|, clipping is performed:
>>> damp.value = -0.1
>>> derived.c1.update()
>>> derived.c1
c1(0.0)
>>> damp.value = 1.1
>>> derived.c1.update()
>>> derived.c1
c1(0.5)
]
variable[damp] assign[=] name[self].subpars.pars.control.damp
call[name[self], parameter[call[name[numpy].clip, parameter[binary_operation[name[damp] / binary_operation[constant[1.0] + name[damp]]], constant[0.0], constant[0.5]]]]] | keyword[def] identifier[update] ( identifier[self] ):
literal[string]
identifier[damp] = identifier[self] . identifier[subpars] . identifier[pars] . identifier[control] . identifier[damp]
identifier[self] ( identifier[numpy] . identifier[clip] ( identifier[damp] /( literal[int] + identifier[damp] ), literal[int] , literal[int] )) | def update(self):
"""Update |C1| based on :math:`c_1 = \\frac{Damp}{1+Damp}`.
Examples:
The first examples show the calculated value of |C1| for
the lowest possible value of |Lag|, the lowest possible value,
and an intermediate value:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> damp(0.0)
>>> derived.c1.update()
>>> derived.c1
c1(0.0)
>>> damp(1.0)
>>> derived.c1.update()
>>> derived.c1
c1(0.5)
>>> damp(0.25)
>>> derived.c1.update()
>>> derived.c1
c1(0.2)
For to low and to high values of |Lag|, clipping is performed:
>>> damp.value = -0.1
>>> derived.c1.update()
>>> derived.c1
c1(0.0)
>>> damp.value = 1.1
>>> derived.c1.update()
>>> derived.c1
c1(0.5)
"""
damp = self.subpars.pars.control.damp
self(numpy.clip(damp / (1.0 + damp), 0.0, 0.5)) |
def unproject(self, xy):
"""
Returns the coordinates from position in meters
"""
(x, y) = xy
lng = x/EARTH_RADIUS * RAD_TO_DEG
lat = 2 * atan(exp(y/EARTH_RADIUS)) - pi/2 * RAD_TO_DEG
return (lng, lat) | def function[unproject, parameter[self, xy]]:
constant[
Returns the coordinates from position in meters
]
<ast.Tuple object at 0x7da1b23e7430> assign[=] name[xy]
variable[lng] assign[=] binary_operation[binary_operation[name[x] / name[EARTH_RADIUS]] * name[RAD_TO_DEG]]
variable[lat] assign[=] binary_operation[binary_operation[constant[2] * call[name[atan], parameter[call[name[exp], parameter[binary_operation[name[y] / name[EARTH_RADIUS]]]]]]] - binary_operation[binary_operation[name[pi] / constant[2]] * name[RAD_TO_DEG]]]
return[tuple[[<ast.Name object at 0x7da1b25062c0>, <ast.Name object at 0x7da1b2507190>]]] | keyword[def] identifier[unproject] ( identifier[self] , identifier[xy] ):
literal[string]
( identifier[x] , identifier[y] )= identifier[xy]
identifier[lng] = identifier[x] / identifier[EARTH_RADIUS] * identifier[RAD_TO_DEG]
identifier[lat] = literal[int] * identifier[atan] ( identifier[exp] ( identifier[y] / identifier[EARTH_RADIUS] ))- identifier[pi] / literal[int] * identifier[RAD_TO_DEG]
keyword[return] ( identifier[lng] , identifier[lat] ) | def unproject(self, xy):
"""
Returns the coordinates from position in meters
"""
(x, y) = xy
lng = x / EARTH_RADIUS * RAD_TO_DEG
lat = 2 * atan(exp(y / EARTH_RADIUS)) - pi / 2 * RAD_TO_DEG
return (lng, lat) |
def get_methods_names(public_properties):
"""
Generates the names of the fields where to inject the getter and setter
methods
:param public_properties: If True, returns the names of public property
accessors, else of hidden property ones
:return: getter and a setter field names
"""
if public_properties:
prefix = ipopo_constants.IPOPO_PROPERTY_PREFIX
else:
prefix = ipopo_constants.IPOPO_HIDDEN_PROPERTY_PREFIX
return (
"{0}{1}".format(prefix, ipopo_constants.IPOPO_GETTER_SUFFIX),
"{0}{1}".format(prefix, ipopo_constants.IPOPO_SETTER_SUFFIX),
) | def function[get_methods_names, parameter[public_properties]]:
constant[
Generates the names of the fields where to inject the getter and setter
methods
:param public_properties: If True, returns the names of public property
accessors, else of hidden property ones
:return: getter and a setter field names
]
if name[public_properties] begin[:]
variable[prefix] assign[=] name[ipopo_constants].IPOPO_PROPERTY_PREFIX
return[tuple[[<ast.Call object at 0x7da1b0349810>, <ast.Call object at 0x7da1b03ae9b0>]]] | keyword[def] identifier[get_methods_names] ( identifier[public_properties] ):
literal[string]
keyword[if] identifier[public_properties] :
identifier[prefix] = identifier[ipopo_constants] . identifier[IPOPO_PROPERTY_PREFIX]
keyword[else] :
identifier[prefix] = identifier[ipopo_constants] . identifier[IPOPO_HIDDEN_PROPERTY_PREFIX]
keyword[return] (
literal[string] . identifier[format] ( identifier[prefix] , identifier[ipopo_constants] . identifier[IPOPO_GETTER_SUFFIX] ),
literal[string] . identifier[format] ( identifier[prefix] , identifier[ipopo_constants] . identifier[IPOPO_SETTER_SUFFIX] ),
) | def get_methods_names(public_properties):
"""
Generates the names of the fields where to inject the getter and setter
methods
:param public_properties: If True, returns the names of public property
accessors, else of hidden property ones
:return: getter and a setter field names
"""
if public_properties:
prefix = ipopo_constants.IPOPO_PROPERTY_PREFIX # depends on [control=['if'], data=[]]
else:
prefix = ipopo_constants.IPOPO_HIDDEN_PROPERTY_PREFIX
return ('{0}{1}'.format(prefix, ipopo_constants.IPOPO_GETTER_SUFFIX), '{0}{1}'.format(prefix, ipopo_constants.IPOPO_SETTER_SUFFIX)) |
def _appendComponent(self, baseGlyph, transformation=None, identifier=None, **kwargs):
"""
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
"""
pointPen = self.getPointPen()
pointPen.addComponent(baseGlyph, transformation=transformation, identifier=identifier)
return self.components[-1] | def function[_appendComponent, parameter[self, baseGlyph, transformation, identifier]]:
constant[
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
]
variable[pointPen] assign[=] call[name[self].getPointPen, parameter[]]
call[name[pointPen].addComponent, parameter[name[baseGlyph]]]
return[call[name[self].components][<ast.UnaryOp object at 0x7da204961a50>]] | keyword[def] identifier[_appendComponent] ( identifier[self] , identifier[baseGlyph] , identifier[transformation] = keyword[None] , identifier[identifier] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[pointPen] = identifier[self] . identifier[getPointPen] ()
identifier[pointPen] . identifier[addComponent] ( identifier[baseGlyph] , identifier[transformation] = identifier[transformation] , identifier[identifier] = identifier[identifier] )
keyword[return] identifier[self] . identifier[components] [- literal[int] ] | def _appendComponent(self, baseGlyph, transformation=None, identifier=None, **kwargs):
"""
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
"""
pointPen = self.getPointPen()
pointPen.addComponent(baseGlyph, transformation=transformation, identifier=identifier)
return self.components[-1] |
def set_root_prefix(self, prefix=None):
"""
Set the prefix to the root environment (default is /opt/anaconda).
This function should only be called once (right after importing
conda_api).
"""
if prefix:
self.ROOT_PREFIX = prefix
else:
# Find some conda instance, and then use info to get 'root_prefix'
worker = self._call_and_parse(['info', '--json'], abspath=False)
info = worker.communicate()[0]
self.ROOT_PREFIX = info['root_prefix'] | def function[set_root_prefix, parameter[self, prefix]]:
constant[
Set the prefix to the root environment (default is /opt/anaconda).
This function should only be called once (right after importing
conda_api).
]
if name[prefix] begin[:]
name[self].ROOT_PREFIX assign[=] name[prefix] | keyword[def] identifier[set_root_prefix] ( identifier[self] , identifier[prefix] = keyword[None] ):
literal[string]
keyword[if] identifier[prefix] :
identifier[self] . identifier[ROOT_PREFIX] = identifier[prefix]
keyword[else] :
identifier[worker] = identifier[self] . identifier[_call_and_parse] ([ literal[string] , literal[string] ], identifier[abspath] = keyword[False] )
identifier[info] = identifier[worker] . identifier[communicate] ()[ literal[int] ]
identifier[self] . identifier[ROOT_PREFIX] = identifier[info] [ literal[string] ] | def set_root_prefix(self, prefix=None):
"""
Set the prefix to the root environment (default is /opt/anaconda).
This function should only be called once (right after importing
conda_api).
"""
if prefix:
self.ROOT_PREFIX = prefix # depends on [control=['if'], data=[]]
else:
# Find some conda instance, and then use info to get 'root_prefix'
worker = self._call_and_parse(['info', '--json'], abspath=False)
info = worker.communicate()[0]
self.ROOT_PREFIX = info['root_prefix'] |
def get(cls, object_id):
"""
Retrieves a single model
:param object_id: the primary id of the model
:type object_id: integer
:return: the object of the parsed xml object
:rtype: object
"""
return fields.ObjectField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/' + str(object_id)).text
)
) | def function[get, parameter[cls, object_id]]:
constant[
Retrieves a single model
:param object_id: the primary id of the model
:type object_id: integer
:return: the object of the parsed xml object
:rtype: object
]
return[call[call[name[fields].ObjectField, parameter[]].decode, parameter[call[name[cls].element_from_string, parameter[call[name[cls]._get_request, parameter[]].text]]]]] | keyword[def] identifier[get] ( identifier[cls] , identifier[object_id] ):
literal[string]
keyword[return] identifier[fields] . identifier[ObjectField] ( identifier[name] = identifier[cls] . identifier[ENDPOINT] , identifier[init_class] = identifier[cls] ). identifier[decode] (
identifier[cls] . identifier[element_from_string] (
identifier[cls] . identifier[_get_request] ( identifier[endpoint] = identifier[cls] . identifier[ENDPOINT] + literal[string] + identifier[str] ( identifier[object_id] )). identifier[text]
)
) | def get(cls, object_id):
"""
Retrieves a single model
:param object_id: the primary id of the model
:type object_id: integer
:return: the object of the parsed xml object
:rtype: object
"""
return fields.ObjectField(name=cls.ENDPOINT, init_class=cls).decode(cls.element_from_string(cls._get_request(endpoint=cls.ENDPOINT + '/' + str(object_id)).text)) |
def ma_bias_ratio(self, date1, date2, data):
""" 計算乖離率(均價)
date1 - date2
:param int data1: n 日
:param int data2: m 日
:rtype: 序列 舊→新
"""
data1 = self.moving_average(data, date1)
data2 = self.moving_average(data, date2)
cal_list = []
for i in range(1, min(len(data1), len(data2)) + 1):
cal_list.append(data1[-i] - data2[-i])
cal_list.reverse()
return cal_list | def function[ma_bias_ratio, parameter[self, date1, date2, data]]:
constant[ 計算乖離率(均價)
date1 - date2
:param int data1: n 日
:param int data2: m 日
:rtype: 序列 舊→新
]
variable[data1] assign[=] call[name[self].moving_average, parameter[name[data], name[date1]]]
variable[data2] assign[=] call[name[self].moving_average, parameter[name[data], name[date2]]]
variable[cal_list] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[call[name[min], parameter[call[name[len], parameter[name[data1]]], call[name[len], parameter[name[data2]]]]] + constant[1]]]]] begin[:]
call[name[cal_list].append, parameter[binary_operation[call[name[data1]][<ast.UnaryOp object at 0x7da1b19cfd30>] - call[name[data2]][<ast.UnaryOp object at 0x7da1b19cf700>]]]]
call[name[cal_list].reverse, parameter[]]
return[name[cal_list]] | keyword[def] identifier[ma_bias_ratio] ( identifier[self] , identifier[date1] , identifier[date2] , identifier[data] ):
literal[string]
identifier[data1] = identifier[self] . identifier[moving_average] ( identifier[data] , identifier[date1] )
identifier[data2] = identifier[self] . identifier[moving_average] ( identifier[data] , identifier[date2] )
identifier[cal_list] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[min] ( identifier[len] ( identifier[data1] ), identifier[len] ( identifier[data2] ))+ literal[int] ):
identifier[cal_list] . identifier[append] ( identifier[data1] [- identifier[i] ]- identifier[data2] [- identifier[i] ])
identifier[cal_list] . identifier[reverse] ()
keyword[return] identifier[cal_list] | def ma_bias_ratio(self, date1, date2, data):
""" 計算乖離率(均價)
date1 - date2
:param int data1: n 日
:param int data2: m 日
:rtype: 序列 舊→新
"""
data1 = self.moving_average(data, date1)
data2 = self.moving_average(data, date2)
cal_list = []
for i in range(1, min(len(data1), len(data2)) + 1):
cal_list.append(data1[-i] - data2[-i]) # depends on [control=['for'], data=['i']]
cal_list.reverse()
return cal_list |
def parse_property(self, tup_tree):
"""
Parse PROPERTY into a CIMProperty object.
VAL is just the pcdata of the enclosed VALUE node.
::
<!ELEMENT PROPERTY (QUALIFIER*, VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
%EmbeddedObject;
xml:lang NMTOKEN #IMPLIED>
"""
self.check_node(tup_tree, 'PROPERTY', ('TYPE', 'NAME'),
('CLASSORIGIN', 'PROPAGATED', 'EmbeddedObject',
'EMBEDDEDOBJECT', 'xml:lang'),
('QUALIFIER', 'VALUE'))
# The 'xml:lang' attribute is tolerated but ignored.
attrl = attrs(tup_tree)
try:
val = self.unpack_value(tup_tree)
except ValueError as exc:
msg = str(exc)
raise CIMXMLParseError(
_format("Cannot parse content of 'VALUE' child element of "
"'PROPERTY' element with name {0!A}: {1}",
attrl['NAME'], msg),
conn_id=self.conn_id)
qualifiers = self.list_of_matching(tup_tree, ('QUALIFIER',))
embedded_object = False
if 'EmbeddedObject' in attrl or 'EMBEDDEDOBJECT' in attrl:
try:
embedded_object = attrl['EmbeddedObject']
except KeyError:
embedded_object = attrl['EMBEDDEDOBJECT']
if embedded_object:
val = self.parse_embeddedObject(val)
return CIMProperty(attrl['NAME'],
val,
type=attrl['TYPE'],
is_array=False,
class_origin=attrl.get('CLASSORIGIN', None),
propagated=self.unpack_boolean(
attrl.get('PROPAGATED', 'false')),
qualifiers=qualifiers,
embedded_object=embedded_object) | def function[parse_property, parameter[self, tup_tree]]:
constant[
Parse PROPERTY into a CIMProperty object.
VAL is just the pcdata of the enclosed VALUE node.
::
<!ELEMENT PROPERTY (QUALIFIER*, VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
%EmbeddedObject;
xml:lang NMTOKEN #IMPLIED>
]
call[name[self].check_node, parameter[name[tup_tree], constant[PROPERTY], tuple[[<ast.Constant object at 0x7da1b0b09f30>, <ast.Constant object at 0x7da1b0b09090>]], tuple[[<ast.Constant object at 0x7da1b0b09a20>, <ast.Constant object at 0x7da1b0b09bd0>, <ast.Constant object at 0x7da1b0b08ac0>, <ast.Constant object at 0x7da1b0b08f10>, <ast.Constant object at 0x7da1b0b0a0e0>]], tuple[[<ast.Constant object at 0x7da1b0b0a830>, <ast.Constant object at 0x7da1b0b09240>]]]]
variable[attrl] assign[=] call[name[attrs], parameter[name[tup_tree]]]
<ast.Try object at 0x7da1b0b098d0>
variable[qualifiers] assign[=] call[name[self].list_of_matching, parameter[name[tup_tree], tuple[[<ast.Constant object at 0x7da1b0b0ab90>]]]]
variable[embedded_object] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b0b0a950> begin[:]
<ast.Try object at 0x7da1b0b0ae30>
if name[embedded_object] begin[:]
variable[val] assign[=] call[name[self].parse_embeddedObject, parameter[name[val]]]
return[call[name[CIMProperty], parameter[call[name[attrl]][constant[NAME]], name[val]]]] | keyword[def] identifier[parse_property] ( identifier[self] , identifier[tup_tree] ):
literal[string]
identifier[self] . identifier[check_node] ( identifier[tup_tree] , literal[string] ,( literal[string] , literal[string] ),
( literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ),
( literal[string] , literal[string] ))
identifier[attrl] = identifier[attrs] ( identifier[tup_tree] )
keyword[try] :
identifier[val] = identifier[self] . identifier[unpack_value] ( identifier[tup_tree] )
keyword[except] identifier[ValueError] keyword[as] identifier[exc] :
identifier[msg] = identifier[str] ( identifier[exc] )
keyword[raise] identifier[CIMXMLParseError] (
identifier[_format] ( literal[string]
literal[string] ,
identifier[attrl] [ literal[string] ], identifier[msg] ),
identifier[conn_id] = identifier[self] . identifier[conn_id] )
identifier[qualifiers] = identifier[self] . identifier[list_of_matching] ( identifier[tup_tree] ,( literal[string] ,))
identifier[embedded_object] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[attrl] keyword[or] literal[string] keyword[in] identifier[attrl] :
keyword[try] :
identifier[embedded_object] = identifier[attrl] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[embedded_object] = identifier[attrl] [ literal[string] ]
keyword[if] identifier[embedded_object] :
identifier[val] = identifier[self] . identifier[parse_embeddedObject] ( identifier[val] )
keyword[return] identifier[CIMProperty] ( identifier[attrl] [ literal[string] ],
identifier[val] ,
identifier[type] = identifier[attrl] [ literal[string] ],
identifier[is_array] = keyword[False] ,
identifier[class_origin] = identifier[attrl] . identifier[get] ( literal[string] , keyword[None] ),
identifier[propagated] = identifier[self] . identifier[unpack_boolean] (
identifier[attrl] . identifier[get] ( literal[string] , literal[string] )),
identifier[qualifiers] = identifier[qualifiers] ,
identifier[embedded_object] = identifier[embedded_object] ) | def parse_property(self, tup_tree):
"""
Parse PROPERTY into a CIMProperty object.
VAL is just the pcdata of the enclosed VALUE node.
::
<!ELEMENT PROPERTY (QUALIFIER*, VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
%EmbeddedObject;
xml:lang NMTOKEN #IMPLIED>
"""
self.check_node(tup_tree, 'PROPERTY', ('TYPE', 'NAME'), ('CLASSORIGIN', 'PROPAGATED', 'EmbeddedObject', 'EMBEDDEDOBJECT', 'xml:lang'), ('QUALIFIER', 'VALUE'))
# The 'xml:lang' attribute is tolerated but ignored.
attrl = attrs(tup_tree)
try:
val = self.unpack_value(tup_tree) # depends on [control=['try'], data=[]]
except ValueError as exc:
msg = str(exc)
raise CIMXMLParseError(_format("Cannot parse content of 'VALUE' child element of 'PROPERTY' element with name {0!A}: {1}", attrl['NAME'], msg), conn_id=self.conn_id) # depends on [control=['except'], data=['exc']]
qualifiers = self.list_of_matching(tup_tree, ('QUALIFIER',))
embedded_object = False
if 'EmbeddedObject' in attrl or 'EMBEDDEDOBJECT' in attrl:
try:
embedded_object = attrl['EmbeddedObject'] # depends on [control=['try'], data=[]]
except KeyError:
embedded_object = attrl['EMBEDDEDOBJECT'] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if embedded_object:
val = self.parse_embeddedObject(val) # depends on [control=['if'], data=[]]
return CIMProperty(attrl['NAME'], val, type=attrl['TYPE'], is_array=False, class_origin=attrl.get('CLASSORIGIN', None), propagated=self.unpack_boolean(attrl.get('PROPAGATED', 'false')), qualifiers=qualifiers, embedded_object=embedded_object) |
def adjustSize( self ):
"""
Adjusts the size of this node to support the length of its contents.
"""
cell = self.scene().cellWidth() * 2
minheight = cell
minwidth = 2 * cell
# fit to the grid size
metrics = QFontMetrics(QApplication.font())
width = metrics.width(self.displayName()) + 20
width = ((width/cell) * cell) + (cell % width)
height = self.rect().height()
# adjust for the icon
icon = self.icon()
if icon and not icon.isNull():
width += self.iconSize().width() + 2
height = max(height, self.iconSize().height() + 2)
w = max(width, minwidth)
h = max(height, minheight)
max_w = self.maximumWidth()
max_h = self.maximumHeight()
if max_w is not None:
w = min(w, max_w)
if max_h is not None:
h = min(h, max_h)
self.setMinimumWidth(w)
self.setMinimumHeight(h)
self.rebuild() | def function[adjustSize, parameter[self]]:
constant[
Adjusts the size of this node to support the length of its contents.
]
variable[cell] assign[=] binary_operation[call[call[name[self].scene, parameter[]].cellWidth, parameter[]] * constant[2]]
variable[minheight] assign[=] name[cell]
variable[minwidth] assign[=] binary_operation[constant[2] * name[cell]]
variable[metrics] assign[=] call[name[QFontMetrics], parameter[call[name[QApplication].font, parameter[]]]]
variable[width] assign[=] binary_operation[call[name[metrics].width, parameter[call[name[self].displayName, parameter[]]]] + constant[20]]
variable[width] assign[=] binary_operation[binary_operation[binary_operation[name[width] / name[cell]] * name[cell]] + binary_operation[name[cell] <ast.Mod object at 0x7da2590d6920> name[width]]]
variable[height] assign[=] call[call[name[self].rect, parameter[]].height, parameter[]]
variable[icon] assign[=] call[name[self].icon, parameter[]]
if <ast.BoolOp object at 0x7da2046210c0> begin[:]
<ast.AugAssign object at 0x7da204621000>
variable[height] assign[=] call[name[max], parameter[name[height], binary_operation[call[call[name[self].iconSize, parameter[]].height, parameter[]] + constant[2]]]]
variable[w] assign[=] call[name[max], parameter[name[width], name[minwidth]]]
variable[h] assign[=] call[name[max], parameter[name[height], name[minheight]]]
variable[max_w] assign[=] call[name[self].maximumWidth, parameter[]]
variable[max_h] assign[=] call[name[self].maximumHeight, parameter[]]
if compare[name[max_w] is_not constant[None]] begin[:]
variable[w] assign[=] call[name[min], parameter[name[w], name[max_w]]]
if compare[name[max_h] is_not constant[None]] begin[:]
variable[h] assign[=] call[name[min], parameter[name[h], name[max_h]]]
call[name[self].setMinimumWidth, parameter[name[w]]]
call[name[self].setMinimumHeight, parameter[name[h]]]
call[name[self].rebuild, parameter[]] | keyword[def] identifier[adjustSize] ( identifier[self] ):
literal[string]
identifier[cell] = identifier[self] . identifier[scene] (). identifier[cellWidth] ()* literal[int]
identifier[minheight] = identifier[cell]
identifier[minwidth] = literal[int] * identifier[cell]
identifier[metrics] = identifier[QFontMetrics] ( identifier[QApplication] . identifier[font] ())
identifier[width] = identifier[metrics] . identifier[width] ( identifier[self] . identifier[displayName] ())+ literal[int]
identifier[width] =(( identifier[width] / identifier[cell] )* identifier[cell] )+( identifier[cell] % identifier[width] )
identifier[height] = identifier[self] . identifier[rect] (). identifier[height] ()
identifier[icon] = identifier[self] . identifier[icon] ()
keyword[if] identifier[icon] keyword[and] keyword[not] identifier[icon] . identifier[isNull] ():
identifier[width] += identifier[self] . identifier[iconSize] (). identifier[width] ()+ literal[int]
identifier[height] = identifier[max] ( identifier[height] , identifier[self] . identifier[iconSize] (). identifier[height] ()+ literal[int] )
identifier[w] = identifier[max] ( identifier[width] , identifier[minwidth] )
identifier[h] = identifier[max] ( identifier[height] , identifier[minheight] )
identifier[max_w] = identifier[self] . identifier[maximumWidth] ()
identifier[max_h] = identifier[self] . identifier[maximumHeight] ()
keyword[if] identifier[max_w] keyword[is] keyword[not] keyword[None] :
identifier[w] = identifier[min] ( identifier[w] , identifier[max_w] )
keyword[if] identifier[max_h] keyword[is] keyword[not] keyword[None] :
identifier[h] = identifier[min] ( identifier[h] , identifier[max_h] )
identifier[self] . identifier[setMinimumWidth] ( identifier[w] )
identifier[self] . identifier[setMinimumHeight] ( identifier[h] )
identifier[self] . identifier[rebuild] () | def adjustSize(self):
"""
Adjusts the size of this node to support the length of its contents.
"""
cell = self.scene().cellWidth() * 2
minheight = cell
minwidth = 2 * cell
# fit to the grid size
metrics = QFontMetrics(QApplication.font())
width = metrics.width(self.displayName()) + 20
width = width / cell * cell + cell % width
height = self.rect().height()
# adjust for the icon
icon = self.icon()
if icon and (not icon.isNull()):
width += self.iconSize().width() + 2
height = max(height, self.iconSize().height() + 2) # depends on [control=['if'], data=[]]
w = max(width, minwidth)
h = max(height, minheight)
max_w = self.maximumWidth()
max_h = self.maximumHeight()
if max_w is not None:
w = min(w, max_w) # depends on [control=['if'], data=['max_w']]
if max_h is not None:
h = min(h, max_h) # depends on [control=['if'], data=['max_h']]
self.setMinimumWidth(w)
self.setMinimumHeight(h)
self.rebuild() |
def league_header(self, league):
"""Prints the league header"""
league_name = " {0} ".format(league)
click.secho("{:=^62}".format(league_name), fg=self.colors.MISC)
click.echo() | def function[league_header, parameter[self, league]]:
constant[Prints the league header]
variable[league_name] assign[=] call[constant[ {0} ].format, parameter[name[league]]]
call[name[click].secho, parameter[call[constant[{:=^62}].format, parameter[name[league_name]]]]]
call[name[click].echo, parameter[]] | keyword[def] identifier[league_header] ( identifier[self] , identifier[league] ):
literal[string]
identifier[league_name] = literal[string] . identifier[format] ( identifier[league] )
identifier[click] . identifier[secho] ( literal[string] . identifier[format] ( identifier[league_name] ), identifier[fg] = identifier[self] . identifier[colors] . identifier[MISC] )
identifier[click] . identifier[echo] () | def league_header(self, league):
"""Prints the league header"""
league_name = ' {0} '.format(league)
click.secho('{:=^62}'.format(league_name), fg=self.colors.MISC)
click.echo() |
def start_file(filename):
"""
Generalized os.startfile for all platforms supported by Qt
This function is simply wrapping QDesktopServices.openUrl
Returns True if successfull, otherwise returns False.
"""
from qtpy.QtCore import QUrl
from qtpy.QtGui import QDesktopServices
# We need to use setUrl instead of setPath because this is the only
# cross-platform way to open external files. setPath fails completely on
# Mac and doesn't open non-ascii files on Linux.
# Fixes Issue 740
url = QUrl()
url.setUrl(filename)
return QDesktopServices.openUrl(url) | def function[start_file, parameter[filename]]:
constant[
Generalized os.startfile for all platforms supported by Qt
This function is simply wrapping QDesktopServices.openUrl
Returns True if successfull, otherwise returns False.
]
from relative_module[qtpy.QtCore] import module[QUrl]
from relative_module[qtpy.QtGui] import module[QDesktopServices]
variable[url] assign[=] call[name[QUrl], parameter[]]
call[name[url].setUrl, parameter[name[filename]]]
return[call[name[QDesktopServices].openUrl, parameter[name[url]]]] | keyword[def] identifier[start_file] ( identifier[filename] ):
literal[string]
keyword[from] identifier[qtpy] . identifier[QtCore] keyword[import] identifier[QUrl]
keyword[from] identifier[qtpy] . identifier[QtGui] keyword[import] identifier[QDesktopServices]
identifier[url] = identifier[QUrl] ()
identifier[url] . identifier[setUrl] ( identifier[filename] )
keyword[return] identifier[QDesktopServices] . identifier[openUrl] ( identifier[url] ) | def start_file(filename):
"""
Generalized os.startfile for all platforms supported by Qt
This function is simply wrapping QDesktopServices.openUrl
Returns True if successfull, otherwise returns False.
"""
from qtpy.QtCore import QUrl
from qtpy.QtGui import QDesktopServices # We need to use setUrl instead of setPath because this is the only
# cross-platform way to open external files. setPath fails completely on
# Mac and doesn't open non-ascii files on Linux.
# Fixes Issue 740
url = QUrl()
url.setUrl(filename)
return QDesktopServices.openUrl(url) |
def getDataPath(_system=thisSystem, _FilePath=FilePath):
"""Gets an appropriate path for storing some local data, such as TLS
credentials.
If the path doesn't exist, it is created.
"""
if _system == "Windows":
pathName = "~/Crypto101/"
else:
pathName = "~/.crypto101/"
path = _FilePath(expanduser(pathName))
if not path.exists():
path.makedirs()
return path | def function[getDataPath, parameter[_system, _FilePath]]:
constant[Gets an appropriate path for storing some local data, such as TLS
credentials.
If the path doesn't exist, it is created.
]
if compare[name[_system] equal[==] constant[Windows]] begin[:]
variable[pathName] assign[=] constant[~/Crypto101/]
variable[path] assign[=] call[name[_FilePath], parameter[call[name[expanduser], parameter[name[pathName]]]]]
if <ast.UnaryOp object at 0x7da2054a5330> begin[:]
call[name[path].makedirs, parameter[]]
return[name[path]] | keyword[def] identifier[getDataPath] ( identifier[_system] = identifier[thisSystem] , identifier[_FilePath] = identifier[FilePath] ):
literal[string]
keyword[if] identifier[_system] == literal[string] :
identifier[pathName] = literal[string]
keyword[else] :
identifier[pathName] = literal[string]
identifier[path] = identifier[_FilePath] ( identifier[expanduser] ( identifier[pathName] ))
keyword[if] keyword[not] identifier[path] . identifier[exists] ():
identifier[path] . identifier[makedirs] ()
keyword[return] identifier[path] | def getDataPath(_system=thisSystem, _FilePath=FilePath):
"""Gets an appropriate path for storing some local data, such as TLS
credentials.
If the path doesn't exist, it is created.
"""
if _system == 'Windows':
pathName = '~/Crypto101/' # depends on [control=['if'], data=[]]
else:
pathName = '~/.crypto101/'
path = _FilePath(expanduser(pathName))
if not path.exists():
path.makedirs() # depends on [control=['if'], data=[]]
return path |
def get_bio(tweet):
"""
Get the bio text of the user who posted the Tweet
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the bio text of the user who posted the Tweet
In a payload the abscence of a bio seems to be represented by an
empty string or a None, this getter always returns a string (so, empty
string if no bio is available).
Example:
>>> from tweet_parser.getter_methods.tweet_user import get_bio
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "user":
... {"description": "Niche millenial content aggregator"}
... }
>>> get_bio(original_format_dict)
'Niche millenial content aggregator'
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "actor":
... {"summary": "Niche millenial content aggregator"}
... }
>>> get_bio(activity_streams_format_dict)
'Niche millenial content aggregator'
"""
if is_original_format(tweet):
bio_or_none = tweet["user"].get("description", "")
else:
bio_or_none = tweet["actor"].get("summary", "")
if bio_or_none is None:
return ""
else:
return bio_or_none | def function[get_bio, parameter[tweet]]:
constant[
Get the bio text of the user who posted the Tweet
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the bio text of the user who posted the Tweet
In a payload the abscence of a bio seems to be represented by an
empty string or a None, this getter always returns a string (so, empty
string if no bio is available).
Example:
>>> from tweet_parser.getter_methods.tweet_user import get_bio
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "user":
... {"description": "Niche millenial content aggregator"}
... }
>>> get_bio(original_format_dict)
'Niche millenial content aggregator'
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "actor":
... {"summary": "Niche millenial content aggregator"}
... }
>>> get_bio(activity_streams_format_dict)
'Niche millenial content aggregator'
]
if call[name[is_original_format], parameter[name[tweet]]] begin[:]
variable[bio_or_none] assign[=] call[call[name[tweet]][constant[user]].get, parameter[constant[description], constant[]]]
if compare[name[bio_or_none] is constant[None]] begin[:]
return[constant[]] | keyword[def] identifier[get_bio] ( identifier[tweet] ):
literal[string]
keyword[if] identifier[is_original_format] ( identifier[tweet] ):
identifier[bio_or_none] = identifier[tweet] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] )
keyword[else] :
identifier[bio_or_none] = identifier[tweet] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[bio_or_none] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[else] :
keyword[return] identifier[bio_or_none] | def get_bio(tweet):
"""
Get the bio text of the user who posted the Tweet
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the bio text of the user who posted the Tweet
In a payload the abscence of a bio seems to be represented by an
empty string or a None, this getter always returns a string (so, empty
string if no bio is available).
Example:
>>> from tweet_parser.getter_methods.tweet_user import get_bio
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "user":
... {"description": "Niche millenial content aggregator"}
... }
>>> get_bio(original_format_dict)
'Niche millenial content aggregator'
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "actor":
... {"summary": "Niche millenial content aggregator"}
... }
>>> get_bio(activity_streams_format_dict)
'Niche millenial content aggregator'
"""
if is_original_format(tweet):
bio_or_none = tweet['user'].get('description', '') # depends on [control=['if'], data=[]]
else:
bio_or_none = tweet['actor'].get('summary', '')
if bio_or_none is None:
return '' # depends on [control=['if'], data=[]]
else:
return bio_or_none |
def seedRandom(self, seed):
"""
C_SeedRandom
:param seed: seed material
:type seed: iterable
"""
low_seed = ckbytelist(seed)
rv = self.lib.C_SeedRandom(self.session, low_seed)
if rv != CKR_OK:
raise PyKCS11Error(rv) | def function[seedRandom, parameter[self, seed]]:
constant[
C_SeedRandom
:param seed: seed material
:type seed: iterable
]
variable[low_seed] assign[=] call[name[ckbytelist], parameter[name[seed]]]
variable[rv] assign[=] call[name[self].lib.C_SeedRandom, parameter[name[self].session, name[low_seed]]]
if compare[name[rv] not_equal[!=] name[CKR_OK]] begin[:]
<ast.Raise object at 0x7da2054a4940> | keyword[def] identifier[seedRandom] ( identifier[self] , identifier[seed] ):
literal[string]
identifier[low_seed] = identifier[ckbytelist] ( identifier[seed] )
identifier[rv] = identifier[self] . identifier[lib] . identifier[C_SeedRandom] ( identifier[self] . identifier[session] , identifier[low_seed] )
keyword[if] identifier[rv] != identifier[CKR_OK] :
keyword[raise] identifier[PyKCS11Error] ( identifier[rv] ) | def seedRandom(self, seed):
"""
C_SeedRandom
:param seed: seed material
:type seed: iterable
"""
low_seed = ckbytelist(seed)
rv = self.lib.C_SeedRandom(self.session, low_seed)
if rv != CKR_OK:
raise PyKCS11Error(rv) # depends on [control=['if'], data=['rv']] |
def is_spark_below_2_2():
"""
Check if spark version is below 2.2
"""
import pyspark
if(hasattr(pyspark,"version")):
full_version = pyspark.version.__version__
# We only need the general spark version (eg, 1.6, 2.2).
parts = full_version.split(".")
spark_version = parts[0] + "." + parts[1]
if(compare_version(spark_version, "2.2")>=0):
return False
return True | def function[is_spark_below_2_2, parameter[]]:
constant[
Check if spark version is below 2.2
]
import module[pyspark]
if call[name[hasattr], parameter[name[pyspark], constant[version]]] begin[:]
variable[full_version] assign[=] name[pyspark].version.__version__
variable[parts] assign[=] call[name[full_version].split, parameter[constant[.]]]
variable[spark_version] assign[=] binary_operation[binary_operation[call[name[parts]][constant[0]] + constant[.]] + call[name[parts]][constant[1]]]
if compare[call[name[compare_version], parameter[name[spark_version], constant[2.2]]] greater_or_equal[>=] constant[0]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_spark_below_2_2] ():
literal[string]
keyword[import] identifier[pyspark]
keyword[if] ( identifier[hasattr] ( identifier[pyspark] , literal[string] )):
identifier[full_version] = identifier[pyspark] . identifier[version] . identifier[__version__]
identifier[parts] = identifier[full_version] . identifier[split] ( literal[string] )
identifier[spark_version] = identifier[parts] [ literal[int] ]+ literal[string] + identifier[parts] [ literal[int] ]
keyword[if] ( identifier[compare_version] ( identifier[spark_version] , literal[string] )>= literal[int] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_spark_below_2_2():
"""
Check if spark version is below 2.2
"""
import pyspark
if hasattr(pyspark, 'version'):
full_version = pyspark.version.__version__
# We only need the general spark version (eg, 1.6, 2.2).
parts = full_version.split('.')
spark_version = parts[0] + '.' + parts[1]
if compare_version(spark_version, '2.2') >= 0:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
def _create_multi_exposure_action(self):
"""Create action for showing the multi exposure tool."""
self.action_multi_exposure = QAction(
QIcon(resources_path('img', 'icons', 'show-multi-exposure.svg')),
self.tr('InaSAFE Multi Exposure Tool'),
self.iface.mainWindow())
self.action_multi_exposure.setStatusTip(self.tr(
'Open the multi exposure tool.'))
self.action_multi_exposure.setWhatsThis(self.tr(
'Open the multi exposure tool.'))
self.action_multi_exposure.setEnabled(True)
self.action_multi_exposure.triggered.connect(self.show_multi_exposure)
self.add_action(
self.action_multi_exposure, add_to_toolbar=self.full_toolbar) | def function[_create_multi_exposure_action, parameter[self]]:
constant[Create action for showing the multi exposure tool.]
name[self].action_multi_exposure assign[=] call[name[QAction], parameter[call[name[QIcon], parameter[call[name[resources_path], parameter[constant[img], constant[icons], constant[show-multi-exposure.svg]]]]], call[name[self].tr, parameter[constant[InaSAFE Multi Exposure Tool]]], call[name[self].iface.mainWindow, parameter[]]]]
call[name[self].action_multi_exposure.setStatusTip, parameter[call[name[self].tr, parameter[constant[Open the multi exposure tool.]]]]]
call[name[self].action_multi_exposure.setWhatsThis, parameter[call[name[self].tr, parameter[constant[Open the multi exposure tool.]]]]]
call[name[self].action_multi_exposure.setEnabled, parameter[constant[True]]]
call[name[self].action_multi_exposure.triggered.connect, parameter[name[self].show_multi_exposure]]
call[name[self].add_action, parameter[name[self].action_multi_exposure]] | keyword[def] identifier[_create_multi_exposure_action] ( identifier[self] ):
literal[string]
identifier[self] . identifier[action_multi_exposure] = identifier[QAction] (
identifier[QIcon] ( identifier[resources_path] ( literal[string] , literal[string] , literal[string] )),
identifier[self] . identifier[tr] ( literal[string] ),
identifier[self] . identifier[iface] . identifier[mainWindow] ())
identifier[self] . identifier[action_multi_exposure] . identifier[setStatusTip] ( identifier[self] . identifier[tr] (
literal[string] ))
identifier[self] . identifier[action_multi_exposure] . identifier[setWhatsThis] ( identifier[self] . identifier[tr] (
literal[string] ))
identifier[self] . identifier[action_multi_exposure] . identifier[setEnabled] ( keyword[True] )
identifier[self] . identifier[action_multi_exposure] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[show_multi_exposure] )
identifier[self] . identifier[add_action] (
identifier[self] . identifier[action_multi_exposure] , identifier[add_to_toolbar] = identifier[self] . identifier[full_toolbar] ) | def _create_multi_exposure_action(self):
"""Create action for showing the multi exposure tool."""
self.action_multi_exposure = QAction(QIcon(resources_path('img', 'icons', 'show-multi-exposure.svg')), self.tr('InaSAFE Multi Exposure Tool'), self.iface.mainWindow())
self.action_multi_exposure.setStatusTip(self.tr('Open the multi exposure tool.'))
self.action_multi_exposure.setWhatsThis(self.tr('Open the multi exposure tool.'))
self.action_multi_exposure.setEnabled(True)
self.action_multi_exposure.triggered.connect(self.show_multi_exposure)
self.add_action(self.action_multi_exposure, add_to_toolbar=self.full_toolbar) |
def reset(resources, *args, **kwargs):
""" Remove dispensers and indicators for idle resources. """
test = kwargs.pop('test', False)
client = redis.Redis(decode_responses=True, **kwargs)
resources = resources if resources else find_resources(client)
for resource in resources:
# investigate sequences
queue = Queue(client=client, resource=resource)
values = client.mget(queue.keys.indicator, queue.keys.dispenser)
try:
indicator, dispenser = map(int, values)
except TypeError:
print('No such queue: "{}".'.format(resource))
continue
# do a bump if there appears to be a queue
if dispenser - indicator + 1:
queue.message('Reset tool bumps.')
indicator = queue.bump()
# do not reset when there is still a queue
size = dispenser - indicator + 1
if size:
print('"{}" is in use by {} user(s).'.format(resource, size))
continue
# reset, except when someone is incoming
with client.pipeline() as pipe:
try:
pipe.watch(queue.keys.dispenser)
if test:
time.sleep(0.02)
pipe.multi()
pipe.delete(queue.keys.dispenser, queue.keys.indicator)
pipe.execute()
except redis.WatchError:
print('Activity detected for "{}".'.format(resource)) | def function[reset, parameter[resources]]:
constant[ Remove dispensers and indicators for idle resources. ]
variable[test] assign[=] call[name[kwargs].pop, parameter[constant[test], constant[False]]]
variable[client] assign[=] call[name[redis].Redis, parameter[]]
variable[resources] assign[=] <ast.IfExp object at 0x7da1b26adc60>
for taget[name[resource]] in starred[name[resources]] begin[:]
variable[queue] assign[=] call[name[Queue], parameter[]]
variable[values] assign[=] call[name[client].mget, parameter[name[queue].keys.indicator, name[queue].keys.dispenser]]
<ast.Try object at 0x7da1b26afdc0>
if binary_operation[binary_operation[name[dispenser] - name[indicator]] + constant[1]] begin[:]
call[name[queue].message, parameter[constant[Reset tool bumps.]]]
variable[indicator] assign[=] call[name[queue].bump, parameter[]]
variable[size] assign[=] binary_operation[binary_operation[name[dispenser] - name[indicator]] + constant[1]]
if name[size] begin[:]
call[name[print], parameter[call[constant["{}" is in use by {} user(s).].format, parameter[name[resource], name[size]]]]]
continue
with call[name[client].pipeline, parameter[]] begin[:]
<ast.Try object at 0x7da1b26af5b0> | keyword[def] identifier[reset] ( identifier[resources] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[test] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[client] = identifier[redis] . identifier[Redis] ( identifier[decode_responses] = keyword[True] ,** identifier[kwargs] )
identifier[resources] = identifier[resources] keyword[if] identifier[resources] keyword[else] identifier[find_resources] ( identifier[client] )
keyword[for] identifier[resource] keyword[in] identifier[resources] :
identifier[queue] = identifier[Queue] ( identifier[client] = identifier[client] , identifier[resource] = identifier[resource] )
identifier[values] = identifier[client] . identifier[mget] ( identifier[queue] . identifier[keys] . identifier[indicator] , identifier[queue] . identifier[keys] . identifier[dispenser] )
keyword[try] :
identifier[indicator] , identifier[dispenser] = identifier[map] ( identifier[int] , identifier[values] )
keyword[except] identifier[TypeError] :
identifier[print] ( literal[string] . identifier[format] ( identifier[resource] ))
keyword[continue]
keyword[if] identifier[dispenser] - identifier[indicator] + literal[int] :
identifier[queue] . identifier[message] ( literal[string] )
identifier[indicator] = identifier[queue] . identifier[bump] ()
identifier[size] = identifier[dispenser] - identifier[indicator] + literal[int]
keyword[if] identifier[size] :
identifier[print] ( literal[string] . identifier[format] ( identifier[resource] , identifier[size] ))
keyword[continue]
keyword[with] identifier[client] . identifier[pipeline] () keyword[as] identifier[pipe] :
keyword[try] :
identifier[pipe] . identifier[watch] ( identifier[queue] . identifier[keys] . identifier[dispenser] )
keyword[if] identifier[test] :
identifier[time] . identifier[sleep] ( literal[int] )
identifier[pipe] . identifier[multi] ()
identifier[pipe] . identifier[delete] ( identifier[queue] . identifier[keys] . identifier[dispenser] , identifier[queue] . identifier[keys] . identifier[indicator] )
identifier[pipe] . identifier[execute] ()
keyword[except] identifier[redis] . identifier[WatchError] :
identifier[print] ( literal[string] . identifier[format] ( identifier[resource] )) | def reset(resources, *args, **kwargs):
""" Remove dispensers and indicators for idle resources. """
test = kwargs.pop('test', False)
client = redis.Redis(decode_responses=True, **kwargs)
resources = resources if resources else find_resources(client)
for resource in resources:
# investigate sequences
queue = Queue(client=client, resource=resource)
values = client.mget(queue.keys.indicator, queue.keys.dispenser)
try:
(indicator, dispenser) = map(int, values) # depends on [control=['try'], data=[]]
except TypeError:
print('No such queue: "{}".'.format(resource))
continue # depends on [control=['except'], data=[]]
# do a bump if there appears to be a queue
if dispenser - indicator + 1:
queue.message('Reset tool bumps.')
indicator = queue.bump() # depends on [control=['if'], data=[]]
# do not reset when there is still a queue
size = dispenser - indicator + 1
if size:
print('"{}" is in use by {} user(s).'.format(resource, size))
continue # depends on [control=['if'], data=[]]
# reset, except when someone is incoming
with client.pipeline() as pipe:
try:
pipe.watch(queue.keys.dispenser)
if test:
time.sleep(0.02) # depends on [control=['if'], data=[]]
pipe.multi()
pipe.delete(queue.keys.dispenser, queue.keys.indicator)
pipe.execute() # depends on [control=['try'], data=[]]
except redis.WatchError:
print('Activity detected for "{}".'.format(resource)) # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['pipe']] # depends on [control=['for'], data=['resource']] |
def write_hash_file(fpath, hash_tag='md5', recompute=False):
r""" Creates a hash file for each file in a path
CommandLine:
python -m utool.util_hash --test-write_hash_file
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> write_hash_file(fpath, 'md5')
"""
hash_dict = {
'md5' : hashlib.md5(),
'sha1' : hashlib.sha1(),
'sha256' : hashlib.sha256(),
}
message = "Unrecognized hashing function. Use 'md5', 'sha1', or 'sha256"
assert hash_tag in hash_dict, message
if fpath.endswith('.%s' % (hash_tag, )):
# No need to compute hashes on hashes
return
# Get hash path
hash_fpath = '%s.%s' % (fpath, hash_tag, )
if os.path.exists(hash_fpath) and not recompute:
return
# Assert this is a file
file_type = util_path.get_path_type(fpath)
if file_type == 'file':
# Compute hash
hasher = hash_dict[hash_tag]
hash_local = get_file_hash(fpath, hasher=hasher, hexdigest=True)
print('[utool] Adding:', fpath, hash_local)
with open(hash_fpath, 'w') as hash_file:
hash_file.write(hash_local)
return hash_fpath | def function[write_hash_file, parameter[fpath, hash_tag, recompute]]:
constant[ Creates a hash file for each file in a path
CommandLine:
python -m utool.util_hash --test-write_hash_file
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> write_hash_file(fpath, 'md5')
]
variable[hash_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b253bc70>, <ast.Constant object at 0x7da1b253a6b0>, <ast.Constant object at 0x7da1b253bf10>], [<ast.Call object at 0x7da1b253b8b0>, <ast.Call object at 0x7da1b253ae90>, <ast.Call object at 0x7da1b253b7f0>]]
variable[message] assign[=] constant[Unrecognized hashing function. Use 'md5', 'sha1', or 'sha256]
assert[compare[name[hash_tag] in name[hash_dict]]]
if call[name[fpath].endswith, parameter[binary_operation[constant[.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b253b310>]]]]] begin[:]
return[None]
variable[hash_fpath] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24affd0>, <ast.Name object at 0x7da1b24afdf0>]]]
if <ast.BoolOp object at 0x7da1b24adcf0> begin[:]
return[None]
variable[file_type] assign[=] call[name[util_path].get_path_type, parameter[name[fpath]]]
if compare[name[file_type] equal[==] constant[file]] begin[:]
variable[hasher] assign[=] call[name[hash_dict]][name[hash_tag]]
variable[hash_local] assign[=] call[name[get_file_hash], parameter[name[fpath]]]
call[name[print], parameter[constant[[utool] Adding:], name[fpath], name[hash_local]]]
with call[name[open], parameter[name[hash_fpath], constant[w]]] begin[:]
call[name[hash_file].write, parameter[name[hash_local]]]
return[name[hash_fpath]] | keyword[def] identifier[write_hash_file] ( identifier[fpath] , identifier[hash_tag] = literal[string] , identifier[recompute] = keyword[False] ):
literal[string]
identifier[hash_dict] ={
literal[string] : identifier[hashlib] . identifier[md5] (),
literal[string] : identifier[hashlib] . identifier[sha1] (),
literal[string] : identifier[hashlib] . identifier[sha256] (),
}
identifier[message] = literal[string]
keyword[assert] identifier[hash_tag] keyword[in] identifier[hash_dict] , identifier[message]
keyword[if] identifier[fpath] . identifier[endswith] ( literal[string] %( identifier[hash_tag] ,)):
keyword[return]
identifier[hash_fpath] = literal[string] %( identifier[fpath] , identifier[hash_tag] ,)
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[hash_fpath] ) keyword[and] keyword[not] identifier[recompute] :
keyword[return]
identifier[file_type] = identifier[util_path] . identifier[get_path_type] ( identifier[fpath] )
keyword[if] identifier[file_type] == literal[string] :
identifier[hasher] = identifier[hash_dict] [ identifier[hash_tag] ]
identifier[hash_local] = identifier[get_file_hash] ( identifier[fpath] , identifier[hasher] = identifier[hasher] , identifier[hexdigest] = keyword[True] )
identifier[print] ( literal[string] , identifier[fpath] , identifier[hash_local] )
keyword[with] identifier[open] ( identifier[hash_fpath] , literal[string] ) keyword[as] identifier[hash_file] :
identifier[hash_file] . identifier[write] ( identifier[hash_local] )
keyword[return] identifier[hash_fpath] | def write_hash_file(fpath, hash_tag='md5', recompute=False):
""" Creates a hash file for each file in a path
CommandLine:
python -m utool.util_hash --test-write_hash_file
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> write_hash_file(fpath, 'md5')
"""
hash_dict = {'md5': hashlib.md5(), 'sha1': hashlib.sha1(), 'sha256': hashlib.sha256()}
message = "Unrecognized hashing function. Use 'md5', 'sha1', or 'sha256"
assert hash_tag in hash_dict, message
if fpath.endswith('.%s' % (hash_tag,)):
# No need to compute hashes on hashes
return # depends on [control=['if'], data=[]]
# Get hash path
hash_fpath = '%s.%s' % (fpath, hash_tag)
if os.path.exists(hash_fpath) and (not recompute):
return # depends on [control=['if'], data=[]]
# Assert this is a file
file_type = util_path.get_path_type(fpath)
if file_type == 'file':
# Compute hash
hasher = hash_dict[hash_tag]
hash_local = get_file_hash(fpath, hasher=hasher, hexdigest=True)
print('[utool] Adding:', fpath, hash_local)
with open(hash_fpath, 'w') as hash_file:
hash_file.write(hash_local) # depends on [control=['with'], data=['hash_file']]
return hash_fpath # depends on [control=['if'], data=[]] |
def timestamp(val):
"""
This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.timestamp('on')
"""
if val is 'on':
todaystring = datetime.datetime.now().strftime('%Y-%m-%d %H%M%S')
extra_layouts['time_stamp'] = todaystring
else:
if 'time_stamp' in extra_layouts:
del extra_layouts['time_stamp']
return | def function[timestamp, parameter[val]]:
constant[
This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.timestamp('on')
]
if compare[name[val] is constant[on]] begin[:]
variable[todaystring] assign[=] call[call[name[datetime].datetime.now, parameter[]].strftime, parameter[constant[%Y-%m-%d %H%M%S]]]
call[name[extra_layouts]][constant[time_stamp]] assign[=] name[todaystring]
return[None] | keyword[def] identifier[timestamp] ( identifier[val] ):
literal[string]
keyword[if] identifier[val] keyword[is] literal[string] :
identifier[todaystring] = identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] )
identifier[extra_layouts] [ literal[string] ]= identifier[todaystring]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[extra_layouts] :
keyword[del] identifier[extra_layouts] [ literal[string] ]
keyword[return] | def timestamp(val):
"""
This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.timestamp('on')
"""
if val is 'on':
todaystring = datetime.datetime.now().strftime('%Y-%m-%d %H%M%S')
extra_layouts['time_stamp'] = todaystring # depends on [control=['if'], data=[]]
elif 'time_stamp' in extra_layouts:
del extra_layouts['time_stamp'] # depends on [control=['if'], data=['extra_layouts']]
return |
def get_asset_contents_by_ids(self, asset_content_ids):
"""Gets an ``AssetList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the asset contents
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``AssetContnts`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
:param asset_content_ids: the list of ``Ids`` to retrieve
:type asset_content_ids: ``osid.id.IdList``
:return: the returned ``AssetContent list``
:rtype: ``osid.repository.AssetContentList``
:raise: ``NotFound`` -- an ``Id`` was not found
:raise: ``NullArgument`` -- ``asset_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('repository',
collection='Asset',
runtime=self._runtime)
object_id_list = [ObjectId(self._get_id(i, 'repository').get_identifier()) for i in asset_content_ids]
results = collection.find(
dict({'assetContents._id': {'$in': object_id_list}},
**self._view_filter()))
# if a match is not found, NotFound exception will be thrown by find_one, so
# the below should always work
asset_content_maps = [ac
for asset in results
for ac in asset['assetContents']
for object_id in object_id_list
if ac['_id'] == object_id]
return objects.AssetContentList(asset_content_maps, runtime=self._runtime, proxy=self._proxy) | def function[get_asset_contents_by_ids, parameter[self, asset_content_ids]]:
constant[Gets an ``AssetList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the asset contents
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``AssetContnts`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
:param asset_content_ids: the list of ``Ids`` to retrieve
:type asset_content_ids: ``osid.id.IdList``
:return: the returned ``AssetContent list``
:rtype: ``osid.repository.AssetContentList``
:raise: ``NotFound`` -- an ``Id`` was not found
:raise: ``NullArgument`` -- ``asset_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[repository]]]
variable[object_id_list] assign[=] <ast.ListComp object at 0x7da1b26ae8c0>
variable[results] assign[=] call[name[collection].find, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da1b26aca30>], [<ast.Dict object at 0x7da1b26ac370>]]]]]]
variable[asset_content_maps] assign[=] <ast.ListComp object at 0x7da1b26ae260>
return[call[name[objects].AssetContentList, parameter[name[asset_content_maps]]]] | keyword[def] identifier[get_asset_contents_by_ids] ( identifier[self] , identifier[asset_content_ids] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
identifier[object_id_list] =[ identifier[ObjectId] ( identifier[self] . identifier[_get_id] ( identifier[i] , literal[string] ). identifier[get_identifier] ()) keyword[for] identifier[i] keyword[in] identifier[asset_content_ids] ]
identifier[results] = identifier[collection] . identifier[find] (
identifier[dict] ({ literal[string] :{ literal[string] : identifier[object_id_list] }},
** identifier[self] . identifier[_view_filter] ()))
identifier[asset_content_maps] =[ identifier[ac]
keyword[for] identifier[asset] keyword[in] identifier[results]
keyword[for] identifier[ac] keyword[in] identifier[asset] [ literal[string] ]
keyword[for] identifier[object_id] keyword[in] identifier[object_id_list]
keyword[if] identifier[ac] [ literal[string] ]== identifier[object_id] ]
keyword[return] identifier[objects] . identifier[AssetContentList] ( identifier[asset_content_maps] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] ) | def get_asset_contents_by_ids(self, asset_content_ids):
"""Gets an ``AssetList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the asset contents
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``AssetContnts`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
:param asset_content_ids: the list of ``Ids`` to retrieve
:type asset_content_ids: ``osid.id.IdList``
:return: the returned ``AssetContent list``
:rtype: ``osid.repository.AssetContentList``
:raise: ``NotFound`` -- an ``Id`` was not found
:raise: ``NullArgument`` -- ``asset_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime)
object_id_list = [ObjectId(self._get_id(i, 'repository').get_identifier()) for i in asset_content_ids]
results = collection.find(dict({'assetContents._id': {'$in': object_id_list}}, **self._view_filter()))
# if a match is not found, NotFound exception will be thrown by find_one, so
# the below should always work
asset_content_maps = [ac for asset in results for ac in asset['assetContents'] for object_id in object_id_list if ac['_id'] == object_id]
return objects.AssetContentList(asset_content_maps, runtime=self._runtime, proxy=self._proxy) |
def generate_output_prov(self,
final_output, # type: Dict[Text, Any]
process_run_id, # type: Optional[str]
name # type: Optional[Text]
): # type: (...) -> None
"""Call wasGeneratedBy() for each output,copy the files into the RO."""
# Timestamp should be created at the earliest
timestamp = datetime.datetime.now()
# For each output, find/register the corresponding
# entity (UUID) and document it as generated in
# a role corresponding to the output
for output, value in final_output.items():
entity = self.declare_artefact(value)
if name is not None:
name = urllib.parse.quote(str(name), safe=":/,#")
# FIXME: Probably not "main" in nested workflows
role = self.wf_ns["main/%s/%s" % (name, output)]
else:
role = self.wf_ns["main/%s" % output]
if not process_run_id:
process_run_id = self.workflow_run_uri
self.document.wasGeneratedBy(
entity, process_run_id, timestamp, None, {"prov:role": role}) | def function[generate_output_prov, parameter[self, final_output, process_run_id, name]]:
constant[Call wasGeneratedBy() for each output,copy the files into the RO.]
variable[timestamp] assign[=] call[name[datetime].datetime.now, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18dc05690>, <ast.Name object at 0x7da18dc06e60>]]] in starred[call[name[final_output].items, parameter[]]] begin[:]
variable[entity] assign[=] call[name[self].declare_artefact, parameter[name[value]]]
if compare[name[name] is_not constant[None]] begin[:]
variable[name] assign[=] call[name[urllib].parse.quote, parameter[call[name[str], parameter[name[name]]]]]
variable[role] assign[=] call[name[self].wf_ns][binary_operation[constant[main/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc05f30>, <ast.Name object at 0x7da18dc06aa0>]]]]
if <ast.UnaryOp object at 0x7da18dc04940> begin[:]
variable[process_run_id] assign[=] name[self].workflow_run_uri
call[name[self].document.wasGeneratedBy, parameter[name[entity], name[process_run_id], name[timestamp], constant[None], dictionary[[<ast.Constant object at 0x7da18dc04070>], [<ast.Name object at 0x7da18dc07b20>]]]] | keyword[def] identifier[generate_output_prov] ( identifier[self] ,
identifier[final_output] ,
identifier[process_run_id] ,
identifier[name]
):
literal[string]
identifier[timestamp] = identifier[datetime] . identifier[datetime] . identifier[now] ()
keyword[for] identifier[output] , identifier[value] keyword[in] identifier[final_output] . identifier[items] ():
identifier[entity] = identifier[self] . identifier[declare_artefact] ( identifier[value] )
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[name] = identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[str] ( identifier[name] ), identifier[safe] = literal[string] )
identifier[role] = identifier[self] . identifier[wf_ns] [ literal[string] %( identifier[name] , identifier[output] )]
keyword[else] :
identifier[role] = identifier[self] . identifier[wf_ns] [ literal[string] % identifier[output] ]
keyword[if] keyword[not] identifier[process_run_id] :
identifier[process_run_id] = identifier[self] . identifier[workflow_run_uri]
identifier[self] . identifier[document] . identifier[wasGeneratedBy] (
identifier[entity] , identifier[process_run_id] , identifier[timestamp] , keyword[None] ,{ literal[string] : identifier[role] }) | def generate_output_prov(self, final_output, process_run_id, name): # type: Dict[Text, Any]
# type: Optional[str]
# type: Optional[Text]
# type: (...) -> None
'Call wasGeneratedBy() for each output,copy the files into the RO.'
# Timestamp should be created at the earliest
timestamp = datetime.datetime.now()
# For each output, find/register the corresponding
# entity (UUID) and document it as generated in
# a role corresponding to the output
for (output, value) in final_output.items():
entity = self.declare_artefact(value)
if name is not None:
name = urllib.parse.quote(str(name), safe=':/,#')
# FIXME: Probably not "main" in nested workflows
role = self.wf_ns['main/%s/%s' % (name, output)] # depends on [control=['if'], data=['name']]
else:
role = self.wf_ns['main/%s' % output]
if not process_run_id:
process_run_id = self.workflow_run_uri # depends on [control=['if'], data=[]]
self.document.wasGeneratedBy(entity, process_run_id, timestamp, None, {'prov:role': role}) # depends on [control=['for'], data=[]] |
def init():
'''
Load in the Chinese-English dictionary. This takes 1-2 seconds. It
is done when the other functions are used, but this is public since
preloading sometimes makes sense.
'''
global dictionaries, trees
dictionaries = {
'traditional': {},
'simplified': {}
}
trees = {
'traditional': Tree(),
'simplified': Tree()
}
lines = gzip.open(
os.path.join(os.path.dirname(__file__), "cedict.txt.gz"),
mode='rt',
encoding='utf-8'
)
exp = re.compile("^([^ ]+) ([^ ]+) \[(.*)\] /(.+)/")
parsed_lines = (exp.match(line).groups()
for line in lines
if line[0] != '#')
for traditional, simplified, pinyin, meaning in parsed_lines:
meaning = meaning.split('/')
dictionaries['traditional'][traditional] = meaning
dictionaries['simplified'][simplified] = meaning
_add_to_tree(trees['traditional'], traditional, meaning)
_add_to_tree(trees['simplified'], simplified, meaning) | def function[init, parameter[]]:
constant[
Load in the Chinese-English dictionary. This takes 1-2 seconds. It
is done when the other functions are used, but this is public since
preloading sometimes makes sense.
]
<ast.Global object at 0x7da1b26ad600>
variable[dictionaries] assign[=] dictionary[[<ast.Constant object at 0x7da1b26acf70>, <ast.Constant object at 0x7da1b26afd90>], [<ast.Dict object at 0x7da1b26aece0>, <ast.Dict object at 0x7da1b26ada20>]]
variable[trees] assign[=] dictionary[[<ast.Constant object at 0x7da1b26acfd0>, <ast.Constant object at 0x7da1b26aec20>], [<ast.Call object at 0x7da1b26afe80>, <ast.Call object at 0x7da1b26ad000>]]
variable[lines] assign[=] call[name[gzip].open, parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[cedict.txt.gz]]]]]
variable[exp] assign[=] call[name[re].compile, parameter[constant[^([^ ]+) ([^ ]+) \[(.*)\] /(.+)/]]]
variable[parsed_lines] assign[=] <ast.GeneratorExp object at 0x7da1b26ac820>
for taget[tuple[[<ast.Name object at 0x7da1b26ae110>, <ast.Name object at 0x7da1b26ac310>, <ast.Name object at 0x7da1b26ad4b0>, <ast.Name object at 0x7da1b26aefb0>]]] in starred[name[parsed_lines]] begin[:]
variable[meaning] assign[=] call[name[meaning].split, parameter[constant[/]]]
call[call[name[dictionaries]][constant[traditional]]][name[traditional]] assign[=] name[meaning]
call[call[name[dictionaries]][constant[simplified]]][name[simplified]] assign[=] name[meaning]
call[name[_add_to_tree], parameter[call[name[trees]][constant[traditional]], name[traditional], name[meaning]]]
call[name[_add_to_tree], parameter[call[name[trees]][constant[simplified]], name[simplified], name[meaning]]] | keyword[def] identifier[init] ():
literal[string]
keyword[global] identifier[dictionaries] , identifier[trees]
identifier[dictionaries] ={
literal[string] :{},
literal[string] :{}
}
identifier[trees] ={
literal[string] : identifier[Tree] (),
literal[string] : identifier[Tree] ()
}
identifier[lines] = identifier[gzip] . identifier[open] (
identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] ),
identifier[mode] = literal[string] ,
identifier[encoding] = literal[string]
)
identifier[exp] = identifier[re] . identifier[compile] ( literal[string] )
identifier[parsed_lines] =( identifier[exp] . identifier[match] ( identifier[line] ). identifier[groups] ()
keyword[for] identifier[line] keyword[in] identifier[lines]
keyword[if] identifier[line] [ literal[int] ]!= literal[string] )
keyword[for] identifier[traditional] , identifier[simplified] , identifier[pinyin] , identifier[meaning] keyword[in] identifier[parsed_lines] :
identifier[meaning] = identifier[meaning] . identifier[split] ( literal[string] )
identifier[dictionaries] [ literal[string] ][ identifier[traditional] ]= identifier[meaning]
identifier[dictionaries] [ literal[string] ][ identifier[simplified] ]= identifier[meaning]
identifier[_add_to_tree] ( identifier[trees] [ literal[string] ], identifier[traditional] , identifier[meaning] )
identifier[_add_to_tree] ( identifier[trees] [ literal[string] ], identifier[simplified] , identifier[meaning] ) | def init():
"""
Load in the Chinese-English dictionary. This takes 1-2 seconds. It
is done when the other functions are used, but this is public since
preloading sometimes makes sense.
"""
global dictionaries, trees
dictionaries = {'traditional': {}, 'simplified': {}}
trees = {'traditional': Tree(), 'simplified': Tree()}
lines = gzip.open(os.path.join(os.path.dirname(__file__), 'cedict.txt.gz'), mode='rt', encoding='utf-8')
exp = re.compile('^([^ ]+) ([^ ]+) \\[(.*)\\] /(.+)/')
parsed_lines = (exp.match(line).groups() for line in lines if line[0] != '#')
for (traditional, simplified, pinyin, meaning) in parsed_lines:
meaning = meaning.split('/')
dictionaries['traditional'][traditional] = meaning
dictionaries['simplified'][simplified] = meaning
_add_to_tree(trees['traditional'], traditional, meaning)
_add_to_tree(trees['simplified'], simplified, meaning) # depends on [control=['for'], data=[]] |
def _to_df(self, query, conn, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None):
"""
Internal convert-to-DataFrame convenience wrapper.
"""
return pd.io.sql.read_sql(str(query), conn, index_col=index_col,
coerce_float=coerce_float, params=params,
parse_dates=parse_dates, columns=columns) | def function[_to_df, parameter[self, query, conn, index_col, coerce_float, params, parse_dates, columns]]:
constant[
Internal convert-to-DataFrame convenience wrapper.
]
return[call[name[pd].io.sql.read_sql, parameter[call[name[str], parameter[name[query]]], name[conn]]]] | keyword[def] identifier[_to_df] ( identifier[self] , identifier[query] , identifier[conn] , identifier[index_col] = keyword[None] , identifier[coerce_float] = keyword[True] , identifier[params] = keyword[None] ,
identifier[parse_dates] = keyword[None] , identifier[columns] = keyword[None] ):
literal[string]
keyword[return] identifier[pd] . identifier[io] . identifier[sql] . identifier[read_sql] ( identifier[str] ( identifier[query] ), identifier[conn] , identifier[index_col] = identifier[index_col] ,
identifier[coerce_float] = identifier[coerce_float] , identifier[params] = identifier[params] ,
identifier[parse_dates] = identifier[parse_dates] , identifier[columns] = identifier[columns] ) | def _to_df(self, query, conn, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None):
"""
Internal convert-to-DataFrame convenience wrapper.
"""
return pd.io.sql.read_sql(str(query), conn, index_col=index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns) |
def export_datasources(print_stdout, datasource_file,
back_references, include_defaults):
"""Export datasources to YAML"""
data = dict_import_export.export_to_dict(
session=db.session,
recursive=True,
back_references=back_references,
include_defaults=include_defaults)
if print_stdout or not datasource_file:
yaml.safe_dump(data, stdout, default_flow_style=False)
if datasource_file:
logging.info('Exporting datasources to %s', datasource_file)
with open(datasource_file, 'w') as data_stream:
yaml.safe_dump(data, data_stream, default_flow_style=False) | def function[export_datasources, parameter[print_stdout, datasource_file, back_references, include_defaults]]:
constant[Export datasources to YAML]
variable[data] assign[=] call[name[dict_import_export].export_to_dict, parameter[]]
if <ast.BoolOp object at 0x7da1b1eeb670> begin[:]
call[name[yaml].safe_dump, parameter[name[data], name[stdout]]]
if name[datasource_file] begin[:]
call[name[logging].info, parameter[constant[Exporting datasources to %s], name[datasource_file]]]
with call[name[open], parameter[name[datasource_file], constant[w]]] begin[:]
call[name[yaml].safe_dump, parameter[name[data], name[data_stream]]] | keyword[def] identifier[export_datasources] ( identifier[print_stdout] , identifier[datasource_file] ,
identifier[back_references] , identifier[include_defaults] ):
literal[string]
identifier[data] = identifier[dict_import_export] . identifier[export_to_dict] (
identifier[session] = identifier[db] . identifier[session] ,
identifier[recursive] = keyword[True] ,
identifier[back_references] = identifier[back_references] ,
identifier[include_defaults] = identifier[include_defaults] )
keyword[if] identifier[print_stdout] keyword[or] keyword[not] identifier[datasource_file] :
identifier[yaml] . identifier[safe_dump] ( identifier[data] , identifier[stdout] , identifier[default_flow_style] = keyword[False] )
keyword[if] identifier[datasource_file] :
identifier[logging] . identifier[info] ( literal[string] , identifier[datasource_file] )
keyword[with] identifier[open] ( identifier[datasource_file] , literal[string] ) keyword[as] identifier[data_stream] :
identifier[yaml] . identifier[safe_dump] ( identifier[data] , identifier[data_stream] , identifier[default_flow_style] = keyword[False] ) | def export_datasources(print_stdout, datasource_file, back_references, include_defaults):
"""Export datasources to YAML"""
data = dict_import_export.export_to_dict(session=db.session, recursive=True, back_references=back_references, include_defaults=include_defaults)
if print_stdout or not datasource_file:
yaml.safe_dump(data, stdout, default_flow_style=False) # depends on [control=['if'], data=[]]
if datasource_file:
logging.info('Exporting datasources to %s', datasource_file)
with open(datasource_file, 'w') as data_stream:
yaml.safe_dump(data, data_stream, default_flow_style=False) # depends on [control=['with'], data=['data_stream']] # depends on [control=['if'], data=[]] |
def get_release_task_attachments(self, project, release_id, environment_id, attempt_id, plan_id, type):
"""GetReleaseTaskAttachments.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int attempt_id:
:param str plan_id:
:param str type:
:rtype: [ReleaseTaskAttachment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if attempt_id is not None:
route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='a4d06688-0dfa-4895-82a5-f43ec9452306',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('[ReleaseTaskAttachment]', self._unwrap_collection(response)) | def function[get_release_task_attachments, parameter[self, project, release_id, environment_id, attempt_id, plan_id, type]]:
constant[GetReleaseTaskAttachments.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int attempt_id:
:param str plan_id:
:param str type:
:rtype: [ReleaseTaskAttachment]
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]]
if compare[name[release_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[releaseId]] assign[=] call[name[self]._serialize.url, parameter[constant[release_id], name[release_id], constant[int]]]
if compare[name[environment_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[environmentId]] assign[=] call[name[self]._serialize.url, parameter[constant[environment_id], name[environment_id], constant[int]]]
if compare[name[attempt_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[attemptId]] assign[=] call[name[self]._serialize.url, parameter[constant[attempt_id], name[attempt_id], constant[int]]]
if compare[name[plan_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[planId]] assign[=] call[name[self]._serialize.url, parameter[constant[plan_id], name[plan_id], constant[str]]]
if compare[name[type] is_not constant[None]] begin[:]
call[name[route_values]][constant[type]] assign[=] call[name[self]._serialize.url, parameter[constant[type], name[type], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[[ReleaseTaskAttachment]], call[name[self]._unwrap_collection, parameter[name[response]]]]]] | keyword[def] identifier[get_release_task_attachments] ( identifier[self] , identifier[project] , identifier[release_id] , identifier[environment_id] , identifier[attempt_id] , identifier[plan_id] , identifier[type] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
keyword[if] identifier[release_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[release_id] , literal[string] )
keyword[if] identifier[environment_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[environment_id] , literal[string] )
keyword[if] identifier[attempt_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[attempt_id] , literal[string] )
keyword[if] identifier[plan_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[plan_id] , literal[string] )
keyword[if] identifier[type] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[type] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] )) | def get_release_task_attachments(self, project, release_id, environment_id, attempt_id, plan_id, type):
"""GetReleaseTaskAttachments.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int attempt_id:
:param str plan_id:
:param str type:
:rtype: [ReleaseTaskAttachment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']]
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') # depends on [control=['if'], data=['release_id']]
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') # depends on [control=['if'], data=['environment_id']]
if attempt_id is not None:
route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int') # depends on [control=['if'], data=['attempt_id']]
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') # depends on [control=['if'], data=['plan_id']]
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str') # depends on [control=['if'], data=['type']]
response = self._send(http_method='GET', location_id='a4d06688-0dfa-4895-82a5-f43ec9452306', version='5.0-preview.1', route_values=route_values)
return self._deserialize('[ReleaseTaskAttachment]', self._unwrap_collection(response)) |
def _get_size(self) -> Tuple[int, int]:
"""Return the (width, height) for this Image.
Returns:
Tuple[int, int]: The (width, height) of this Image
"""
w = ffi.new("int *")
h = ffi.new("int *")
lib.TCOD_image_get_size(self.image_c, w, h)
return w[0], h[0] | def function[_get_size, parameter[self]]:
constant[Return the (width, height) for this Image.
Returns:
Tuple[int, int]: The (width, height) of this Image
]
variable[w] assign[=] call[name[ffi].new, parameter[constant[int *]]]
variable[h] assign[=] call[name[ffi].new, parameter[constant[int *]]]
call[name[lib].TCOD_image_get_size, parameter[name[self].image_c, name[w], name[h]]]
return[tuple[[<ast.Subscript object at 0x7da1b23446a0>, <ast.Subscript object at 0x7da1b2346290>]]] | keyword[def] identifier[_get_size] ( identifier[self] )-> identifier[Tuple] [ identifier[int] , identifier[int] ]:
literal[string]
identifier[w] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[h] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[lib] . identifier[TCOD_image_get_size] ( identifier[self] . identifier[image_c] , identifier[w] , identifier[h] )
keyword[return] identifier[w] [ literal[int] ], identifier[h] [ literal[int] ] | def _get_size(self) -> Tuple[int, int]:
"""Return the (width, height) for this Image.
Returns:
Tuple[int, int]: The (width, height) of this Image
"""
w = ffi.new('int *')
h = ffi.new('int *')
lib.TCOD_image_get_size(self.image_c, w, h)
return (w[0], h[0]) |
def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"):
"""
Starts a packet capture.
:param port_number: allocated port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
"""
if not [port["port_number"] for port in self._ports_mapping if port_number == port["port_number"]]:
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name,
port_number=port_number))
if port_number not in self._nios:
raise NodeError("Port {} is not connected".format(port_number))
nio = self._nios[port_number]
if nio.capturing:
raise NodeError("Packet capture is already activated on port {port_number}".format(port_number=port_number))
nio.startPacketCapture(output_file)
bridge_name = "{}-{}".format(self._id, port_number)
yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name,
output_file=output_file))
log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name,
id=self.id,
port_number=port_number)) | def function[start_capture, parameter[self, port_number, output_file, data_link_type]]:
constant[
Starts a packet capture.
:param port_number: allocated port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
]
if <ast.UnaryOp object at 0x7da204621ae0> begin[:]
<ast.Raise object at 0x7da1b17abfd0>
if compare[name[port_number] <ast.NotIn object at 0x7da2590d7190> name[self]._nios] begin[:]
<ast.Raise object at 0x7da2047ea230>
variable[nio] assign[=] call[name[self]._nios][name[port_number]]
if name[nio].capturing begin[:]
<ast.Raise object at 0x7da2047e9fc0>
call[name[nio].startPacketCapture, parameter[name[output_file]]]
variable[bridge_name] assign[=] call[constant[{}-{}].format, parameter[name[self]._id, name[port_number]]]
<ast.YieldFrom object at 0x7da2047ebbb0>
call[name[log].info, parameter[call[constant[Cloud '{name}' [{id}]: starting packet capture on port {port_number}].format, parameter[]]]] | keyword[def] identifier[start_capture] ( identifier[self] , identifier[port_number] , identifier[output_file] , identifier[data_link_type] = literal[string] ):
literal[string]
keyword[if] keyword[not] [ identifier[port] [ literal[string] ] keyword[for] identifier[port] keyword[in] identifier[self] . identifier[_ports_mapping] keyword[if] identifier[port_number] == identifier[port] [ literal[string] ]]:
keyword[raise] identifier[NodeError] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] ,
identifier[port_number] = identifier[port_number] ))
keyword[if] identifier[port_number] keyword[not] keyword[in] identifier[self] . identifier[_nios] :
keyword[raise] identifier[NodeError] ( literal[string] . identifier[format] ( identifier[port_number] ))
identifier[nio] = identifier[self] . identifier[_nios] [ identifier[port_number] ]
keyword[if] identifier[nio] . identifier[capturing] :
keyword[raise] identifier[NodeError] ( literal[string] . identifier[format] ( identifier[port_number] = identifier[port_number] ))
identifier[nio] . identifier[startPacketCapture] ( identifier[output_file] )
identifier[bridge_name] = literal[string] . identifier[format] ( identifier[self] . identifier[_id] , identifier[port_number] )
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] ,
identifier[output_file] = identifier[output_file] ))
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] ,
identifier[id] = identifier[self] . identifier[id] ,
identifier[port_number] = identifier[port_number] )) | def start_capture(self, port_number, output_file, data_link_type='DLT_EN10MB'):
"""
Starts a packet capture.
:param port_number: allocated port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
"""
if not [port['port_number'] for port in self._ports_mapping if port_number == port['port_number']]:
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name, port_number=port_number)) # depends on [control=['if'], data=[]]
if port_number not in self._nios:
raise NodeError('Port {} is not connected'.format(port_number)) # depends on [control=['if'], data=['port_number']]
nio = self._nios[port_number]
if nio.capturing:
raise NodeError('Packet capture is already activated on port {port_number}'.format(port_number=port_number)) # depends on [control=['if'], data=[]]
nio.startPacketCapture(output_file)
bridge_name = '{}-{}'.format(self._id, port_number)
yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name, output_file=output_file))
log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number)) |
def attachStatement(self, CorpNum, MgtKeyType, MgtKey, ItemCode, StmtMgtKey, UserID=None):
""" 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
MgtKey : 세금계산서 문서관리번호
StmtCode : 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
StmtMgtKey : 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
uri = '/Taxinvoice/' + MgtKeyType + '/' + MgtKey + '/AttachStmt'
postData = self._stringtify({"ItemCode": ItemCode, "MgtKey": StmtMgtKey})
return self._httppost(uri, postData, CorpNum, UserID) | def function[attachStatement, parameter[self, CorpNum, MgtKeyType, MgtKey, ItemCode, StmtMgtKey, UserID]]:
constant[ 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
MgtKey : 세금계산서 문서관리번호
StmtCode : 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
StmtMgtKey : 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
]
if compare[name[MgtKeyType] <ast.NotIn object at 0x7da2590d7190> name[self].__MgtKeyTypes] begin[:]
<ast.Raise object at 0x7da1b1020cd0>
if <ast.BoolOp object at 0x7da1b1023e80> begin[:]
<ast.Raise object at 0x7da1b1023b80>
variable[uri] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[/Taxinvoice/] + name[MgtKeyType]] + constant[/]] + name[MgtKey]] + constant[/AttachStmt]]
variable[postData] assign[=] call[name[self]._stringtify, parameter[dictionary[[<ast.Constant object at 0x7da20c6a9a20>, <ast.Constant object at 0x7da20c6a9cc0>], [<ast.Name object at 0x7da20c6ab1f0>, <ast.Name object at 0x7da20c6ab9d0>]]]]
return[call[name[self]._httppost, parameter[name[uri], name[postData], name[CorpNum], name[UserID]]]] | keyword[def] identifier[attachStatement] ( identifier[self] , identifier[CorpNum] , identifier[MgtKeyType] , identifier[MgtKey] , identifier[ItemCode] , identifier[StmtMgtKey] , identifier[UserID] = keyword[None] ):
literal[string]
keyword[if] identifier[MgtKeyType] keyword[not] keyword[in] identifier[self] . identifier[__MgtKeyTypes] :
keyword[raise] identifier[PopbillException] (- literal[int] , literal[string] )
keyword[if] identifier[MgtKey] == keyword[None] keyword[or] identifier[MgtKey] == literal[string] :
keyword[raise] identifier[PopbillException] (- literal[int] , literal[string] )
identifier[uri] = literal[string] + identifier[MgtKeyType] + literal[string] + identifier[MgtKey] + literal[string]
identifier[postData] = identifier[self] . identifier[_stringtify] ({ literal[string] : identifier[ItemCode] , literal[string] : identifier[StmtMgtKey] })
keyword[return] identifier[self] . identifier[_httppost] ( identifier[uri] , identifier[postData] , identifier[CorpNum] , identifier[UserID] ) | def attachStatement(self, CorpNum, MgtKeyType, MgtKey, ItemCode, StmtMgtKey, UserID=None):
""" 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
MgtKey : 세금계산서 문서관리번호
StmtCode : 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
StmtMgtKey : 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, '관리번호 형태가 올바르지 않습니다.') # depends on [control=['if'], data=[]]
if MgtKey == None or MgtKey == '':
raise PopbillException(-99999999, '관리번호가 입력되지 않았습니다.') # depends on [control=['if'], data=[]]
uri = '/Taxinvoice/' + MgtKeyType + '/' + MgtKey + '/AttachStmt'
postData = self._stringtify({'ItemCode': ItemCode, 'MgtKey': StmtMgtKey})
return self._httppost(uri, postData, CorpNum, UserID) |
def classify_elements(self,
file,
file_content_type=None,
model=None,
**kwargs):
"""
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
classification** and **Compare two documents** methods, the default is
`contracts`. For the **Extract tables** method, the default is `tables`. These
defaults apply to the standalone methods as well as to the methods' use in
batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('compare-comply', 'V1',
'classify_elements')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = {}
form_data['file'] = (None, file, file_content_type or
'application/octet-stream')
url = '/v1/element_classification'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
files=form_data,
accept_json=True)
return response | def function[classify_elements, parameter[self, file, file_content_type, model]]:
constant[
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
classification** and **Compare two documents** methods, the default is
`contracts`. For the **Extract tables** method, the default is `tables`. These
defaults apply to the standalone methods as well as to the methods' use in
batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
]
if compare[name[file] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1b45270>
variable[headers] assign[=] dictionary[[], []]
if compare[constant[headers] in name[kwargs]] begin[:]
call[name[headers].update, parameter[call[name[kwargs].get, parameter[constant[headers]]]]]
variable[sdk_headers] assign[=] call[name[get_sdk_headers], parameter[constant[compare-comply], constant[V1], constant[classify_elements]]]
call[name[headers].update, parameter[name[sdk_headers]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2049600d0>, <ast.Constant object at 0x7da204960cd0>], [<ast.Attribute object at 0x7da2044c1690>, <ast.Name object at 0x7da2044c0340>]]
variable[form_data] assign[=] dictionary[[], []]
call[name[form_data]][constant[file]] assign[=] tuple[[<ast.Constant object at 0x7da2044c3820>, <ast.Name object at 0x7da2044c0df0>, <ast.BoolOp object at 0x7da2044c2170>]]
variable[url] assign[=] constant[/v1/element_classification]
variable[response] assign[=] call[name[self].request, parameter[]]
return[name[response]] | keyword[def] identifier[classify_elements] ( identifier[self] ,
identifier[file] ,
identifier[file_content_type] = keyword[None] ,
identifier[model] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[file] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[headers] ={}
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[headers] . identifier[update] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[sdk_headers] = identifier[get_sdk_headers] ( literal[string] , literal[string] ,
literal[string] )
identifier[headers] . identifier[update] ( identifier[sdk_headers] )
identifier[params] ={ literal[string] : identifier[self] . identifier[version] , literal[string] : identifier[model] }
identifier[form_data] ={}
identifier[form_data] [ literal[string] ]=( keyword[None] , identifier[file] , identifier[file_content_type] keyword[or]
literal[string] )
identifier[url] = literal[string]
identifier[response] = identifier[self] . identifier[request] (
identifier[method] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[headers] = identifier[headers] ,
identifier[params] = identifier[params] ,
identifier[files] = identifier[form_data] ,
identifier[accept_json] = keyword[True] )
keyword[return] identifier[response] | def classify_elements(self, file, file_content_type=None, model=None, **kwargs):
"""
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
classification** and **Compare two documents** methods, the default is
`contracts`. For the **Extract tables** method, the default is `tables`. These
defaults apply to the standalone methods as well as to the methods' use in
batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided') # depends on [control=['if'], data=[]]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers')) # depends on [control=['if'], data=['kwargs']]
sdk_headers = get_sdk_headers('compare-comply', 'V1', 'classify_elements')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = {}
form_data['file'] = (None, file, file_content_type or 'application/octet-stream')
url = '/v1/element_classification'
response = self.request(method='POST', url=url, headers=headers, params=params, files=form_data, accept_json=True)
return response |
def login(self):
"""
Login
:return:
"""
uri = 'API/Auth/Login'
if self._token:
json_data = {'token': self._token, 'domain': self._domain}
else:
json_data = {'username': self._username, 'password': self._password, 'domain': self._domain}
result = self.__rest_client.request_put(uri, json_data)
self.__rest_client.session.headers.update(authorization="Basic {0}".format(result.replace('"', ''))) | def function[login, parameter[self]]:
constant[
Login
:return:
]
variable[uri] assign[=] constant[API/Auth/Login]
if name[self]._token begin[:]
variable[json_data] assign[=] dictionary[[<ast.Constant object at 0x7da20cabfd00>, <ast.Constant object at 0x7da20cabc880>], [<ast.Attribute object at 0x7da20cabc4f0>, <ast.Attribute object at 0x7da20cabf490>]]
variable[result] assign[=] call[name[self].__rest_client.request_put, parameter[name[uri], name[json_data]]]
call[name[self].__rest_client.session.headers.update, parameter[]] | keyword[def] identifier[login] ( identifier[self] ):
literal[string]
identifier[uri] = literal[string]
keyword[if] identifier[self] . identifier[_token] :
identifier[json_data] ={ literal[string] : identifier[self] . identifier[_token] , literal[string] : identifier[self] . identifier[_domain] }
keyword[else] :
identifier[json_data] ={ literal[string] : identifier[self] . identifier[_username] , literal[string] : identifier[self] . identifier[_password] , literal[string] : identifier[self] . identifier[_domain] }
identifier[result] = identifier[self] . identifier[__rest_client] . identifier[request_put] ( identifier[uri] , identifier[json_data] )
identifier[self] . identifier[__rest_client] . identifier[session] . identifier[headers] . identifier[update] ( identifier[authorization] = literal[string] . identifier[format] ( identifier[result] . identifier[replace] ( literal[string] , literal[string] ))) | def login(self):
"""
Login
:return:
"""
uri = 'API/Auth/Login'
if self._token:
json_data = {'token': self._token, 'domain': self._domain} # depends on [control=['if'], data=[]]
else:
json_data = {'username': self._username, 'password': self._password, 'domain': self._domain}
result = self.__rest_client.request_put(uri, json_data)
self.__rest_client.session.headers.update(authorization='Basic {0}'.format(result.replace('"', ''))) |
def _check_model(obj, models=None):
"""Checks object if it's a peewee model and unique."""
return isinstance(obj, type) and issubclass(obj, pw.Model) and hasattr(obj, '_meta') | def function[_check_model, parameter[obj, models]]:
constant[Checks object if it's a peewee model and unique.]
return[<ast.BoolOp object at 0x7da20e960bb0>] | keyword[def] identifier[_check_model] ( identifier[obj] , identifier[models] = keyword[None] ):
literal[string]
keyword[return] identifier[isinstance] ( identifier[obj] , identifier[type] ) keyword[and] identifier[issubclass] ( identifier[obj] , identifier[pw] . identifier[Model] ) keyword[and] identifier[hasattr] ( identifier[obj] , literal[string] ) | def _check_model(obj, models=None):
"""Checks object if it's a peewee model and unique."""
return isinstance(obj, type) and issubclass(obj, pw.Model) and hasattr(obj, '_meta') |
def get_session_value(self, name, default=None):
"""Get value from session"""
session_name = 'list_{}_{}_{}'.format(self.kwargs.get('app'), self.kwargs.get('model'), name)
return self.request.session.get(session_name, default) | def function[get_session_value, parameter[self, name, default]]:
constant[Get value from session]
variable[session_name] assign[=] call[constant[list_{}_{}_{}].format, parameter[call[name[self].kwargs.get, parameter[constant[app]]], call[name[self].kwargs.get, parameter[constant[model]]], name[name]]]
return[call[name[self].request.session.get, parameter[name[session_name], name[default]]]] | keyword[def] identifier[get_session_value] ( identifier[self] , identifier[name] , identifier[default] = keyword[None] ):
literal[string]
identifier[session_name] = literal[string] . identifier[format] ( identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] ), identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] ), identifier[name] )
keyword[return] identifier[self] . identifier[request] . identifier[session] . identifier[get] ( identifier[session_name] , identifier[default] ) | def get_session_value(self, name, default=None):
"""Get value from session"""
session_name = 'list_{}_{}_{}'.format(self.kwargs.get('app'), self.kwargs.get('model'), name)
return self.request.session.get(session_name, default) |
def _include_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern.
Patterns are not quite the same as implemented by the 'fnmatch'
module: '*' and '?' match non-special characters, where "special"
is platform-dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found.
"""
# XXX docstring lying about what the special chars are?
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.files.add(name)
found = True
return found | def function[_include_pattern, parameter[self, pattern, anchor, prefix, is_regex]]:
constant[Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern.
Patterns are not quite the same as implemented by the 'fnmatch'
module: '*' and '?' match non-special characters, where "special"
is platform-dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found.
]
variable[found] assign[=] constant[False]
variable[pattern_re] assign[=] call[name[self]._translate_pattern, parameter[name[pattern], name[anchor], name[prefix], name[is_regex]]]
if compare[name[self].allfiles is constant[None]] begin[:]
call[name[self].findall, parameter[]]
for taget[name[name]] in starred[name[self].allfiles] begin[:]
if call[name[pattern_re].search, parameter[name[name]]] begin[:]
call[name[self].files.add, parameter[name[name]]]
variable[found] assign[=] constant[True]
return[name[found]] | keyword[def] identifier[_include_pattern] ( identifier[self] , identifier[pattern] , identifier[anchor] = keyword[True] , identifier[prefix] = keyword[None] ,
identifier[is_regex] = keyword[False] ):
literal[string]
identifier[found] = keyword[False]
identifier[pattern_re] = identifier[self] . identifier[_translate_pattern] ( identifier[pattern] , identifier[anchor] , identifier[prefix] , identifier[is_regex] )
keyword[if] identifier[self] . identifier[allfiles] keyword[is] keyword[None] :
identifier[self] . identifier[findall] ()
keyword[for] identifier[name] keyword[in] identifier[self] . identifier[allfiles] :
keyword[if] identifier[pattern_re] . identifier[search] ( identifier[name] ):
identifier[self] . identifier[files] . identifier[add] ( identifier[name] )
identifier[found] = keyword[True]
keyword[return] identifier[found] | def _include_pattern(self, pattern, anchor=True, prefix=None, is_regex=False):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern.
Patterns are not quite the same as implemented by the 'fnmatch'
module: '*' and '?' match non-special characters, where "special"
is platform-dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found.
"""
# XXX docstring lying about what the special chars are?
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall() # depends on [control=['if'], data=[]]
for name in self.allfiles:
if pattern_re.search(name):
self.files.add(name)
found = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
return found |
def _zero_pad_gaps(tr, gaps, fill_gaps=True):
"""
Replace padded parts of trace with zeros.
Will cut around gaps, detrend, then pad the gaps with zeros.
:type tr: :class:`osbpy.core.stream.Trace`
:param tr: A trace that has had the gaps padded
:param gaps: List of dict of start-time and end-time as UTCDateTime objects
:type gaps: list
:return: :class:`obspy.core.stream.Trace`
"""
start_in, end_in = (tr.stats.starttime, tr.stats.endtime)
for gap in gaps:
stream = Stream()
if gap['starttime'] > tr.stats.starttime:
stream += tr.slice(tr.stats.starttime, gap['starttime']).copy()
if gap['endtime'] < tr.stats.endtime:
# Note this can happen when gaps are calculated for a trace that
# is longer than `length`, e.g. gaps are calculated pre-trim.
stream += tr.slice(gap['endtime'], tr.stats.endtime).copy()
tr = stream.merge()[0]
if fill_gaps:
tr = tr.split()
tr = tr.detrend()
tr = tr.merge(fill_value=0)[0]
# Need to check length - if a gap happened overlapping the end or start
# of the trace this will be lost.
if tr.stats.starttime != start_in:
# pad with zeros
tr.data = np.concatenate(
[np.zeros(int(tr.stats.starttime - start_in)), tr.data])
tr.stats.starttime = start_in
if tr.stats.endtime != end_in:
tr.data = np.concatenate(
[tr.data, np.zeros(int(end_in - tr.stats.endtime))])
return tr | def function[_zero_pad_gaps, parameter[tr, gaps, fill_gaps]]:
constant[
Replace padded parts of trace with zeros.
Will cut around gaps, detrend, then pad the gaps with zeros.
:type tr: :class:`osbpy.core.stream.Trace`
:param tr: A trace that has had the gaps padded
:param gaps: List of dict of start-time and end-time as UTCDateTime objects
:type gaps: list
:return: :class:`obspy.core.stream.Trace`
]
<ast.Tuple object at 0x7da1b07898d0> assign[=] tuple[[<ast.Attribute object at 0x7da1b078a0b0>, <ast.Attribute object at 0x7da1b0789720>]]
for taget[name[gap]] in starred[name[gaps]] begin[:]
variable[stream] assign[=] call[name[Stream], parameter[]]
if compare[call[name[gap]][constant[starttime]] greater[>] name[tr].stats.starttime] begin[:]
<ast.AugAssign object at 0x7da1b078bf10>
if compare[call[name[gap]][constant[endtime]] less[<] name[tr].stats.endtime] begin[:]
<ast.AugAssign object at 0x7da1b078aa10>
variable[tr] assign[=] call[call[name[stream].merge, parameter[]]][constant[0]]
if name[fill_gaps] begin[:]
variable[tr] assign[=] call[name[tr].split, parameter[]]
variable[tr] assign[=] call[name[tr].detrend, parameter[]]
variable[tr] assign[=] call[call[name[tr].merge, parameter[]]][constant[0]]
if compare[name[tr].stats.starttime not_equal[!=] name[start_in]] begin[:]
name[tr].data assign[=] call[name[np].concatenate, parameter[list[[<ast.Call object at 0x7da1b078a6e0>, <ast.Attribute object at 0x7da1b078b580>]]]]
name[tr].stats.starttime assign[=] name[start_in]
if compare[name[tr].stats.endtime not_equal[!=] name[end_in]] begin[:]
name[tr].data assign[=] call[name[np].concatenate, parameter[list[[<ast.Attribute object at 0x7da1b078aec0>, <ast.Call object at 0x7da1b0788ac0>]]]]
return[name[tr]] | keyword[def] identifier[_zero_pad_gaps] ( identifier[tr] , identifier[gaps] , identifier[fill_gaps] = keyword[True] ):
literal[string]
identifier[start_in] , identifier[end_in] =( identifier[tr] . identifier[stats] . identifier[starttime] , identifier[tr] . identifier[stats] . identifier[endtime] )
keyword[for] identifier[gap] keyword[in] identifier[gaps] :
identifier[stream] = identifier[Stream] ()
keyword[if] identifier[gap] [ literal[string] ]> identifier[tr] . identifier[stats] . identifier[starttime] :
identifier[stream] += identifier[tr] . identifier[slice] ( identifier[tr] . identifier[stats] . identifier[starttime] , identifier[gap] [ literal[string] ]). identifier[copy] ()
keyword[if] identifier[gap] [ literal[string] ]< identifier[tr] . identifier[stats] . identifier[endtime] :
identifier[stream] += identifier[tr] . identifier[slice] ( identifier[gap] [ literal[string] ], identifier[tr] . identifier[stats] . identifier[endtime] ). identifier[copy] ()
identifier[tr] = identifier[stream] . identifier[merge] ()[ literal[int] ]
keyword[if] identifier[fill_gaps] :
identifier[tr] = identifier[tr] . identifier[split] ()
identifier[tr] = identifier[tr] . identifier[detrend] ()
identifier[tr] = identifier[tr] . identifier[merge] ( identifier[fill_value] = literal[int] )[ literal[int] ]
keyword[if] identifier[tr] . identifier[stats] . identifier[starttime] != identifier[start_in] :
identifier[tr] . identifier[data] = identifier[np] . identifier[concatenate] (
[ identifier[np] . identifier[zeros] ( identifier[int] ( identifier[tr] . identifier[stats] . identifier[starttime] - identifier[start_in] )), identifier[tr] . identifier[data] ])
identifier[tr] . identifier[stats] . identifier[starttime] = identifier[start_in]
keyword[if] identifier[tr] . identifier[stats] . identifier[endtime] != identifier[end_in] :
identifier[tr] . identifier[data] = identifier[np] . identifier[concatenate] (
[ identifier[tr] . identifier[data] , identifier[np] . identifier[zeros] ( identifier[int] ( identifier[end_in] - identifier[tr] . identifier[stats] . identifier[endtime] ))])
keyword[return] identifier[tr] | def _zero_pad_gaps(tr, gaps, fill_gaps=True):
"""
Replace padded parts of trace with zeros.
Will cut around gaps, detrend, then pad the gaps with zeros.
:type tr: :class:`osbpy.core.stream.Trace`
:param tr: A trace that has had the gaps padded
:param gaps: List of dict of start-time and end-time as UTCDateTime objects
:type gaps: list
:return: :class:`obspy.core.stream.Trace`
"""
(start_in, end_in) = (tr.stats.starttime, tr.stats.endtime)
for gap in gaps:
stream = Stream()
if gap['starttime'] > tr.stats.starttime:
stream += tr.slice(tr.stats.starttime, gap['starttime']).copy() # depends on [control=['if'], data=[]]
if gap['endtime'] < tr.stats.endtime:
# Note this can happen when gaps are calculated for a trace that
# is longer than `length`, e.g. gaps are calculated pre-trim.
stream += tr.slice(gap['endtime'], tr.stats.endtime).copy() # depends on [control=['if'], data=[]]
tr = stream.merge()[0] # depends on [control=['for'], data=['gap']]
if fill_gaps:
tr = tr.split()
tr = tr.detrend()
tr = tr.merge(fill_value=0)[0]
# Need to check length - if a gap happened overlapping the end or start
# of the trace this will be lost.
if tr.stats.starttime != start_in:
# pad with zeros
tr.data = np.concatenate([np.zeros(int(tr.stats.starttime - start_in)), tr.data])
tr.stats.starttime = start_in # depends on [control=['if'], data=['start_in']]
if tr.stats.endtime != end_in:
tr.data = np.concatenate([tr.data, np.zeros(int(end_in - tr.stats.endtime))]) # depends on [control=['if'], data=['end_in']] # depends on [control=['if'], data=[]]
return tr |
def _replace_placeholder(self, spider):
"""
Returns replaced db_name and collection_name(base on spider's name).
if your db_name or collection_name does not have a placeholder or
your db_name or collection_name that not base on spider's name
you must override this function.
"""
return self.db_name % {'spider': spider.name}, self.collection_name % {'spider': spider.name} | def function[_replace_placeholder, parameter[self, spider]]:
constant[
Returns replaced db_name and collection_name(base on spider's name).
if your db_name or collection_name does not have a placeholder or
your db_name or collection_name that not base on spider's name
you must override this function.
]
return[tuple[[<ast.BinOp object at 0x7da2049630d0>, <ast.BinOp object at 0x7da204962380>]]] | keyword[def] identifier[_replace_placeholder] ( identifier[self] , identifier[spider] ):
literal[string]
keyword[return] identifier[self] . identifier[db_name] %{ literal[string] : identifier[spider] . identifier[name] }, identifier[self] . identifier[collection_name] %{ literal[string] : identifier[spider] . identifier[name] } | def _replace_placeholder(self, spider):
"""
Returns replaced db_name and collection_name(base on spider's name).
if your db_name or collection_name does not have a placeholder or
your db_name or collection_name that not base on spider's name
you must override this function.
"""
return (self.db_name % {'spider': spider.name}, self.collection_name % {'spider': spider.name}) |
def abspath(self, path):
"""Returns the absolute path to the specified relative or user-relative
path. For ssh paths, just return the full ssh path."""
if self.is_ssh(path):
return path
else:
return os.path.abspath(path) | def function[abspath, parameter[self, path]]:
constant[Returns the absolute path to the specified relative or user-relative
path. For ssh paths, just return the full ssh path.]
if call[name[self].is_ssh, parameter[name[path]]] begin[:]
return[name[path]] | keyword[def] identifier[abspath] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] identifier[self] . identifier[is_ssh] ( identifier[path] ):
keyword[return] identifier[path]
keyword[else] :
keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( identifier[path] ) | def abspath(self, path):
"""Returns the absolute path to the specified relative or user-relative
path. For ssh paths, just return the full ssh path."""
if self.is_ssh(path):
return path # depends on [control=['if'], data=[]]
else:
return os.path.abspath(path) |
def c_to_p(self, var_c, pro_ac=None):
"""
Converts a c. SequenceVariant to a p. SequenceVariant on the specified protein accession
Author: Rudy Rico
:param SequenceVariant var_c: hgvsc tag
:param str pro_ac: protein accession
:rtype: hgvs.sequencevariant.SequenceVariant
"""
if not (var_c.type == "c"):
raise HGVSInvalidVariantError("Expected a cDNA (c.) variant; got " + str(var_c))
if self._validator:
self._validator.validate(var_c)
reference_data = RefTranscriptData(self.hdp, var_c.ac, pro_ac)
builder = altseqbuilder.AltSeqBuilder(var_c, reference_data)
# TODO: handle case where you get 2+ alt sequences back;
# currently get list of 1 element loop structure implemented
# to handle this, but doesn't really do anything currently.
all_alt_data = builder.build_altseq()
var_ps = []
for alt_data in all_alt_data:
builder = altseq_to_hgvsp.AltSeqToHgvsp(reference_data, alt_data)
var_p = builder.build_hgvsp()
var_ps.append(var_p)
var_p = var_ps[0]
if self.add_gene_symbol:
self._update_gene_symbol(var_p, var_c.gene)
return var_p | def function[c_to_p, parameter[self, var_c, pro_ac]]:
constant[
Converts a c. SequenceVariant to a p. SequenceVariant on the specified protein accession
Author: Rudy Rico
:param SequenceVariant var_c: hgvsc tag
:param str pro_ac: protein accession
:rtype: hgvs.sequencevariant.SequenceVariant
]
if <ast.UnaryOp object at 0x7da1b1d54070> begin[:]
<ast.Raise object at 0x7da1b1d56bc0>
if name[self]._validator begin[:]
call[name[self]._validator.validate, parameter[name[var_c]]]
variable[reference_data] assign[=] call[name[RefTranscriptData], parameter[name[self].hdp, name[var_c].ac, name[pro_ac]]]
variable[builder] assign[=] call[name[altseqbuilder].AltSeqBuilder, parameter[name[var_c], name[reference_data]]]
variable[all_alt_data] assign[=] call[name[builder].build_altseq, parameter[]]
variable[var_ps] assign[=] list[[]]
for taget[name[alt_data]] in starred[name[all_alt_data]] begin[:]
variable[builder] assign[=] call[name[altseq_to_hgvsp].AltSeqToHgvsp, parameter[name[reference_data], name[alt_data]]]
variable[var_p] assign[=] call[name[builder].build_hgvsp, parameter[]]
call[name[var_ps].append, parameter[name[var_p]]]
variable[var_p] assign[=] call[name[var_ps]][constant[0]]
if name[self].add_gene_symbol begin[:]
call[name[self]._update_gene_symbol, parameter[name[var_p], name[var_c].gene]]
return[name[var_p]] | keyword[def] identifier[c_to_p] ( identifier[self] , identifier[var_c] , identifier[pro_ac] = keyword[None] ):
literal[string]
keyword[if] keyword[not] ( identifier[var_c] . identifier[type] == literal[string] ):
keyword[raise] identifier[HGVSInvalidVariantError] ( literal[string] + identifier[str] ( identifier[var_c] ))
keyword[if] identifier[self] . identifier[_validator] :
identifier[self] . identifier[_validator] . identifier[validate] ( identifier[var_c] )
identifier[reference_data] = identifier[RefTranscriptData] ( identifier[self] . identifier[hdp] , identifier[var_c] . identifier[ac] , identifier[pro_ac] )
identifier[builder] = identifier[altseqbuilder] . identifier[AltSeqBuilder] ( identifier[var_c] , identifier[reference_data] )
identifier[all_alt_data] = identifier[builder] . identifier[build_altseq] ()
identifier[var_ps] =[]
keyword[for] identifier[alt_data] keyword[in] identifier[all_alt_data] :
identifier[builder] = identifier[altseq_to_hgvsp] . identifier[AltSeqToHgvsp] ( identifier[reference_data] , identifier[alt_data] )
identifier[var_p] = identifier[builder] . identifier[build_hgvsp] ()
identifier[var_ps] . identifier[append] ( identifier[var_p] )
identifier[var_p] = identifier[var_ps] [ literal[int] ]
keyword[if] identifier[self] . identifier[add_gene_symbol] :
identifier[self] . identifier[_update_gene_symbol] ( identifier[var_p] , identifier[var_c] . identifier[gene] )
keyword[return] identifier[var_p] | def c_to_p(self, var_c, pro_ac=None):
"""
Converts a c. SequenceVariant to a p. SequenceVariant on the specified protein accession
Author: Rudy Rico
:param SequenceVariant var_c: hgvsc tag
:param str pro_ac: protein accession
:rtype: hgvs.sequencevariant.SequenceVariant
"""
if not var_c.type == 'c':
raise HGVSInvalidVariantError('Expected a cDNA (c.) variant; got ' + str(var_c)) # depends on [control=['if'], data=[]]
if self._validator:
self._validator.validate(var_c) # depends on [control=['if'], data=[]]
reference_data = RefTranscriptData(self.hdp, var_c.ac, pro_ac)
builder = altseqbuilder.AltSeqBuilder(var_c, reference_data)
# TODO: handle case where you get 2+ alt sequences back;
# currently get list of 1 element loop structure implemented
# to handle this, but doesn't really do anything currently.
all_alt_data = builder.build_altseq()
var_ps = []
for alt_data in all_alt_data:
builder = altseq_to_hgvsp.AltSeqToHgvsp(reference_data, alt_data)
var_p = builder.build_hgvsp()
var_ps.append(var_p) # depends on [control=['for'], data=['alt_data']]
var_p = var_ps[0]
if self.add_gene_symbol:
self._update_gene_symbol(var_p, var_c.gene) # depends on [control=['if'], data=[]]
return var_p |
def create_platform(platform):
'''
.. versionadded:: 2019.2.0
Create a new device platform
platform
String of device platform, e.g., ``junos``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_platform junos
'''
nb_platform = get_('dcim', 'platforms', slug=slugify(platform))
if nb_platform:
return False
else:
payload = {'name': platform, 'slug': slugify(platform)}
plat = _add('dcim', 'platforms', payload)
if plat:
return {'dcim': {'platforms': payload}}
else:
return False | def function[create_platform, parameter[platform]]:
constant[
.. versionadded:: 2019.2.0
Create a new device platform
platform
String of device platform, e.g., ``junos``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_platform junos
]
variable[nb_platform] assign[=] call[name[get_], parameter[constant[dcim], constant[platforms]]]
if name[nb_platform] begin[:]
return[constant[False]] | keyword[def] identifier[create_platform] ( identifier[platform] ):
literal[string]
identifier[nb_platform] = identifier[get_] ( literal[string] , literal[string] , identifier[slug] = identifier[slugify] ( identifier[platform] ))
keyword[if] identifier[nb_platform] :
keyword[return] keyword[False]
keyword[else] :
identifier[payload] ={ literal[string] : identifier[platform] , literal[string] : identifier[slugify] ( identifier[platform] )}
identifier[plat] = identifier[_add] ( literal[string] , literal[string] , identifier[payload] )
keyword[if] identifier[plat] :
keyword[return] { literal[string] :{ literal[string] : identifier[payload] }}
keyword[else] :
keyword[return] keyword[False] | def create_platform(platform):
"""
.. versionadded:: 2019.2.0
Create a new device platform
platform
String of device platform, e.g., ``junos``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_platform junos
"""
nb_platform = get_('dcim', 'platforms', slug=slugify(platform))
if nb_platform:
return False # depends on [control=['if'], data=[]]
else:
payload = {'name': platform, 'slug': slugify(platform)}
plat = _add('dcim', 'platforms', payload)
if plat:
return {'dcim': {'platforms': payload}} # depends on [control=['if'], data=[]]
else:
return False |
def PCA_reduce(X, Q):
"""
A helpful function for linearly reducing the dimensionality of the data X
to Q.
:param X: data array of size N (number of points) x D (dimensions)
:param Q: Number of latent dimensions, Q < D
:return: PCA projection array of size N x Q.
"""
assert Q <= X.shape[1], 'Cannot have more latent dimensions than observed'
evals, evecs = np.linalg.eigh(np.cov(X.T))
W = evecs[:, -Q:]
return (X - X.mean(0)).dot(W) | def function[PCA_reduce, parameter[X, Q]]:
constant[
A helpful function for linearly reducing the dimensionality of the data X
to Q.
:param X: data array of size N (number of points) x D (dimensions)
:param Q: Number of latent dimensions, Q < D
:return: PCA projection array of size N x Q.
]
assert[compare[name[Q] less_or_equal[<=] call[name[X].shape][constant[1]]]]
<ast.Tuple object at 0x7da1b1cee3b0> assign[=] call[name[np].linalg.eigh, parameter[call[name[np].cov, parameter[name[X].T]]]]
variable[W] assign[=] call[name[evecs]][tuple[[<ast.Slice object at 0x7da1b1ceece0>, <ast.Slice object at 0x7da1b1cef910>]]]
return[call[binary_operation[name[X] - call[name[X].mean, parameter[constant[0]]]].dot, parameter[name[W]]]] | keyword[def] identifier[PCA_reduce] ( identifier[X] , identifier[Q] ):
literal[string]
keyword[assert] identifier[Q] <= identifier[X] . identifier[shape] [ literal[int] ], literal[string]
identifier[evals] , identifier[evecs] = identifier[np] . identifier[linalg] . identifier[eigh] ( identifier[np] . identifier[cov] ( identifier[X] . identifier[T] ))
identifier[W] = identifier[evecs] [:,- identifier[Q] :]
keyword[return] ( identifier[X] - identifier[X] . identifier[mean] ( literal[int] )). identifier[dot] ( identifier[W] ) | def PCA_reduce(X, Q):
"""
A helpful function for linearly reducing the dimensionality of the data X
to Q.
:param X: data array of size N (number of points) x D (dimensions)
:param Q: Number of latent dimensions, Q < D
:return: PCA projection array of size N x Q.
"""
assert Q <= X.shape[1], 'Cannot have more latent dimensions than observed'
(evals, evecs) = np.linalg.eigh(np.cov(X.T))
W = evecs[:, -Q:]
return (X - X.mean(0)).dot(W) |
def get_allowed_shape_ranges(spec):
"""
For a given model specification, returns a dictionary with a shape range object for each input feature name.
"""
shaper = NeuralNetworkShaper(spec, False)
inputs = _get_input_names(spec)
output = {}
for input in inputs:
output[input] = shaper.shape(input)
return output | def function[get_allowed_shape_ranges, parameter[spec]]:
constant[
For a given model specification, returns a dictionary with a shape range object for each input feature name.
]
variable[shaper] assign[=] call[name[NeuralNetworkShaper], parameter[name[spec], constant[False]]]
variable[inputs] assign[=] call[name[_get_input_names], parameter[name[spec]]]
variable[output] assign[=] dictionary[[], []]
for taget[name[input]] in starred[name[inputs]] begin[:]
call[name[output]][name[input]] assign[=] call[name[shaper].shape, parameter[name[input]]]
return[name[output]] | keyword[def] identifier[get_allowed_shape_ranges] ( identifier[spec] ):
literal[string]
identifier[shaper] = identifier[NeuralNetworkShaper] ( identifier[spec] , keyword[False] )
identifier[inputs] = identifier[_get_input_names] ( identifier[spec] )
identifier[output] ={}
keyword[for] identifier[input] keyword[in] identifier[inputs] :
identifier[output] [ identifier[input] ]= identifier[shaper] . identifier[shape] ( identifier[input] )
keyword[return] identifier[output] | def get_allowed_shape_ranges(spec):
"""
For a given model specification, returns a dictionary with a shape range object for each input feature name.
"""
shaper = NeuralNetworkShaper(spec, False)
inputs = _get_input_names(spec)
output = {}
for input in inputs:
output[input] = shaper.shape(input) # depends on [control=['for'], data=['input']]
return output |
def return_attr(self, attr, labels=None):
"""return the attributes for each channels.
Parameters
----------
attr : str
attribute specified in Chan.attr.keys()
"""
all_labels = self.return_label()
if labels is None:
labels = all_labels
all_attr = []
for one_label in labels:
idx = all_labels.index(one_label)
try:
all_attr.append(self.chan[idx].attr[attr])
except KeyError:
possible_attr = ', '.join(self.chan[idx].attr.keys())
lg.debug('key "{}" not found, '.format(attr) +
'possible keys are {}'.format(possible_attr))
all_attr.append(None)
return all_attr | def function[return_attr, parameter[self, attr, labels]]:
constant[return the attributes for each channels.
Parameters
----------
attr : str
attribute specified in Chan.attr.keys()
]
variable[all_labels] assign[=] call[name[self].return_label, parameter[]]
if compare[name[labels] is constant[None]] begin[:]
variable[labels] assign[=] name[all_labels]
variable[all_attr] assign[=] list[[]]
for taget[name[one_label]] in starred[name[labels]] begin[:]
variable[idx] assign[=] call[name[all_labels].index, parameter[name[one_label]]]
<ast.Try object at 0x7da1b0ec2fe0>
return[name[all_attr]] | keyword[def] identifier[return_attr] ( identifier[self] , identifier[attr] , identifier[labels] = keyword[None] ):
literal[string]
identifier[all_labels] = identifier[self] . identifier[return_label] ()
keyword[if] identifier[labels] keyword[is] keyword[None] :
identifier[labels] = identifier[all_labels]
identifier[all_attr] =[]
keyword[for] identifier[one_label] keyword[in] identifier[labels] :
identifier[idx] = identifier[all_labels] . identifier[index] ( identifier[one_label] )
keyword[try] :
identifier[all_attr] . identifier[append] ( identifier[self] . identifier[chan] [ identifier[idx] ]. identifier[attr] [ identifier[attr] ])
keyword[except] identifier[KeyError] :
identifier[possible_attr] = literal[string] . identifier[join] ( identifier[self] . identifier[chan] [ identifier[idx] ]. identifier[attr] . identifier[keys] ())
identifier[lg] . identifier[debug] ( literal[string] . identifier[format] ( identifier[attr] )+
literal[string] . identifier[format] ( identifier[possible_attr] ))
identifier[all_attr] . identifier[append] ( keyword[None] )
keyword[return] identifier[all_attr] | def return_attr(self, attr, labels=None):
"""return the attributes for each channels.
Parameters
----------
attr : str
attribute specified in Chan.attr.keys()
"""
all_labels = self.return_label()
if labels is None:
labels = all_labels # depends on [control=['if'], data=['labels']]
all_attr = []
for one_label in labels:
idx = all_labels.index(one_label)
try:
all_attr.append(self.chan[idx].attr[attr]) # depends on [control=['try'], data=[]]
except KeyError:
possible_attr = ', '.join(self.chan[idx].attr.keys())
lg.debug('key "{}" not found, '.format(attr) + 'possible keys are {}'.format(possible_attr))
all_attr.append(None) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['one_label']]
return all_attr |
def languages_column(self, obj):
"""
Adds languages columns.
"""
languages = self.get_available_languages(obj)
return '<span class="available-languages">{0}</span>'.format(
" ".join(languages)
) | def function[languages_column, parameter[self, obj]]:
constant[
Adds languages columns.
]
variable[languages] assign[=] call[name[self].get_available_languages, parameter[name[obj]]]
return[call[constant[<span class="available-languages">{0}</span>].format, parameter[call[constant[ ].join, parameter[name[languages]]]]]] | keyword[def] identifier[languages_column] ( identifier[self] , identifier[obj] ):
literal[string]
identifier[languages] = identifier[self] . identifier[get_available_languages] ( identifier[obj] )
keyword[return] literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[languages] )
) | def languages_column(self, obj):
"""
Adds languages columns.
"""
languages = self.get_available_languages(obj)
return '<span class="available-languages">{0}</span>'.format(' '.join(languages)) |
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
if self.location:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id,
location=self.location).execute(num_retries=self.num_retries)
else:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute(num_retries=self.num_retries)
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5) | def function[cancel_query, parameter[self]]:
constant[
Cancel all started queries that have not yet completed
]
variable[jobs] assign[=] call[name[self].service.jobs, parameter[]]
if <ast.BoolOp object at 0x7da1b056f340> begin[:]
call[name[self].log.info, parameter[constant[Attempting to cancel job : %s, %s], name[self].project_id, name[self].running_job_id]]
if name[self].location begin[:]
call[call[name[jobs].cancel, parameter[]].execute, parameter[]]
variable[max_polling_attempts] assign[=] constant[12]
variable[polling_attempts] assign[=] constant[0]
variable[job_complete] assign[=] constant[False]
while <ast.BoolOp object at 0x7da1b0558280> begin[:]
variable[polling_attempts] assign[=] binary_operation[name[polling_attempts] + constant[1]]
variable[job_complete] assign[=] call[name[self].poll_job_complete, parameter[name[self].running_job_id]]
if name[job_complete] begin[:]
call[name[self].log.info, parameter[constant[Job successfully canceled: %s, %s], name[self].project_id, name[self].running_job_id]] | keyword[def] identifier[cancel_query] ( identifier[self] ):
literal[string]
identifier[jobs] = identifier[self] . identifier[service] . identifier[jobs] ()
keyword[if] ( identifier[self] . identifier[running_job_id] keyword[and]
keyword[not] identifier[self] . identifier[poll_job_complete] ( identifier[self] . identifier[running_job_id] )):
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[self] . identifier[project_id] ,
identifier[self] . identifier[running_job_id] )
keyword[if] identifier[self] . identifier[location] :
identifier[jobs] . identifier[cancel] (
identifier[projectId] = identifier[self] . identifier[project_id] ,
identifier[jobId] = identifier[self] . identifier[running_job_id] ,
identifier[location] = identifier[self] . identifier[location] ). identifier[execute] ( identifier[num_retries] = identifier[self] . identifier[num_retries] )
keyword[else] :
identifier[jobs] . identifier[cancel] (
identifier[projectId] = identifier[self] . identifier[project_id] ,
identifier[jobId] = identifier[self] . identifier[running_job_id] ). identifier[execute] ( identifier[num_retries] = identifier[self] . identifier[num_retries] )
keyword[else] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[return]
identifier[max_polling_attempts] = literal[int]
identifier[polling_attempts] = literal[int]
identifier[job_complete] = keyword[False]
keyword[while] identifier[polling_attempts] < identifier[max_polling_attempts] keyword[and] keyword[not] identifier[job_complete] :
identifier[polling_attempts] = identifier[polling_attempts] + literal[int]
identifier[job_complete] = identifier[self] . identifier[poll_job_complete] ( identifier[self] . identifier[running_job_id] )
keyword[if] identifier[job_complete] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] ,
identifier[self] . identifier[project_id] , identifier[self] . identifier[running_job_id] )
keyword[elif] identifier[polling_attempts] == identifier[max_polling_attempts] :
identifier[self] . identifier[log] . identifier[info] (
literal[string]
literal[string] ,
identifier[self] . identifier[running_job_id] )
keyword[else] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] ,
identifier[self] . identifier[running_job_id] )
identifier[time] . identifier[sleep] ( literal[int] ) | def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if self.running_job_id and (not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id, self.running_job_id)
if self.location:
jobs.cancel(projectId=self.project_id, jobId=self.running_job_id, location=self.location).execute(num_retries=self.num_retries) # depends on [control=['if'], data=[]]
else:
jobs.cancel(projectId=self.project_id, jobId=self.running_job_id).execute(num_retries=self.num_retries) # depends on [control=['if'], data=[]]
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and (not job_complete):
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s', self.project_id, self.running_job_id) # depends on [control=['if'], data=[]]
elif polling_attempts == max_polling_attempts:
self.log.info('Stopping polling due to timeout. Job with id %s has not completed cancel and may or may not finish.', self.running_job_id) # depends on [control=['if'], data=[]]
else:
self.log.info('Waiting for canceled job with id %s to finish.', self.running_job_id)
time.sleep(5) # depends on [control=['while'], data=[]] |
def random_processor(M, N, L, qubit_yield, num_evil=0):
"""A utility function that generates a random :math:`C_{M,N,L}` missing some
percentage of its qubits.
INPUTS:
M,N,L: the chimera parameters
qubit_yield: ratio (0 <= qubit_yield <= 1) of #{qubits}/(2*M*N*L)
num_evil: number of broken in-cell couplers between working qubits
OUTPUT:
proc (:class:`processor`): a :class:`processor` instance with a random
collection of qubits and couplers as specified
"""
# replacement for lambda in edge filter below that works with bot h
def edge_filter(pq):
# we have to unpack the (p,q) edge
p, q = pq
return q in qubits and p < q
qubits = [(x, y, u, k) for x in range(M) for y in range(N) for u in [0, 1] for k in range(L)]
nqubits = len(qubits)
qubits = set(sample(qubits, int(nqubits * qubit_yield)))
edges = ((p, q) for p in qubits for q in _chimera_neighbors(M, N, L, p))
edges = list(filter(edge_filter, edges))
possibly_evil_edges = [(p, q) for p, q in edges if p[:2] == q[:2]]
num_evil = min(num_evil, len(possibly_evil_edges))
evil_edges = sample(possibly_evil_edges, num_evil)
return processor(set(edges) - set(evil_edges), M=M, N=N, L=L, linear=False) | def function[random_processor, parameter[M, N, L, qubit_yield, num_evil]]:
constant[A utility function that generates a random :math:`C_{M,N,L}` missing some
percentage of its qubits.
INPUTS:
M,N,L: the chimera parameters
qubit_yield: ratio (0 <= qubit_yield <= 1) of #{qubits}/(2*M*N*L)
num_evil: number of broken in-cell couplers between working qubits
OUTPUT:
proc (:class:`processor`): a :class:`processor` instance with a random
collection of qubits and couplers as specified
]
def function[edge_filter, parameter[pq]]:
<ast.Tuple object at 0x7da1b0f06920> assign[=] name[pq]
return[<ast.BoolOp object at 0x7da1b0f05210>]
variable[qubits] assign[=] <ast.ListComp object at 0x7da1b0f05630>
variable[nqubits] assign[=] call[name[len], parameter[name[qubits]]]
variable[qubits] assign[=] call[name[set], parameter[call[name[sample], parameter[name[qubits], call[name[int], parameter[binary_operation[name[nqubits] * name[qubit_yield]]]]]]]]
variable[edges] assign[=] <ast.GeneratorExp object at 0x7da1b0f06e90>
variable[edges] assign[=] call[name[list], parameter[call[name[filter], parameter[name[edge_filter], name[edges]]]]]
variable[possibly_evil_edges] assign[=] <ast.ListComp object at 0x7da1b0f04580>
variable[num_evil] assign[=] call[name[min], parameter[name[num_evil], call[name[len], parameter[name[possibly_evil_edges]]]]]
variable[evil_edges] assign[=] call[name[sample], parameter[name[possibly_evil_edges], name[num_evil]]]
return[call[name[processor], parameter[binary_operation[call[name[set], parameter[name[edges]]] - call[name[set], parameter[name[evil_edges]]]]]]] | keyword[def] identifier[random_processor] ( identifier[M] , identifier[N] , identifier[L] , identifier[qubit_yield] , identifier[num_evil] = literal[int] ):
literal[string]
keyword[def] identifier[edge_filter] ( identifier[pq] ):
identifier[p] , identifier[q] = identifier[pq]
keyword[return] identifier[q] keyword[in] identifier[qubits] keyword[and] identifier[p] < identifier[q]
identifier[qubits] =[( identifier[x] , identifier[y] , identifier[u] , identifier[k] ) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[M] ) keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[N] ) keyword[for] identifier[u] keyword[in] [ literal[int] , literal[int] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[L] )]
identifier[nqubits] = identifier[len] ( identifier[qubits] )
identifier[qubits] = identifier[set] ( identifier[sample] ( identifier[qubits] , identifier[int] ( identifier[nqubits] * identifier[qubit_yield] )))
identifier[edges] =(( identifier[p] , identifier[q] ) keyword[for] identifier[p] keyword[in] identifier[qubits] keyword[for] identifier[q] keyword[in] identifier[_chimera_neighbors] ( identifier[M] , identifier[N] , identifier[L] , identifier[p] ))
identifier[edges] = identifier[list] ( identifier[filter] ( identifier[edge_filter] , identifier[edges] ))
identifier[possibly_evil_edges] =[( identifier[p] , identifier[q] ) keyword[for] identifier[p] , identifier[q] keyword[in] identifier[edges] keyword[if] identifier[p] [: literal[int] ]== identifier[q] [: literal[int] ]]
identifier[num_evil] = identifier[min] ( identifier[num_evil] , identifier[len] ( identifier[possibly_evil_edges] ))
identifier[evil_edges] = identifier[sample] ( identifier[possibly_evil_edges] , identifier[num_evil] )
keyword[return] identifier[processor] ( identifier[set] ( identifier[edges] )- identifier[set] ( identifier[evil_edges] ), identifier[M] = identifier[M] , identifier[N] = identifier[N] , identifier[L] = identifier[L] , identifier[linear] = keyword[False] ) | def random_processor(M, N, L, qubit_yield, num_evil=0):
"""A utility function that generates a random :math:`C_{M,N,L}` missing some
percentage of its qubits.
INPUTS:
M,N,L: the chimera parameters
qubit_yield: ratio (0 <= qubit_yield <= 1) of #{qubits}/(2*M*N*L)
num_evil: number of broken in-cell couplers between working qubits
OUTPUT:
proc (:class:`processor`): a :class:`processor` instance with a random
collection of qubits and couplers as specified
"""
# replacement for lambda in edge filter below that works with bot h
def edge_filter(pq):
# we have to unpack the (p,q) edge
(p, q) = pq
return q in qubits and p < q
qubits = [(x, y, u, k) for x in range(M) for y in range(N) for u in [0, 1] for k in range(L)]
nqubits = len(qubits)
qubits = set(sample(qubits, int(nqubits * qubit_yield)))
edges = ((p, q) for p in qubits for q in _chimera_neighbors(M, N, L, p))
edges = list(filter(edge_filter, edges))
possibly_evil_edges = [(p, q) for (p, q) in edges if p[:2] == q[:2]]
num_evil = min(num_evil, len(possibly_evil_edges))
evil_edges = sample(possibly_evil_edges, num_evil)
return processor(set(edges) - set(evil_edges), M=M, N=N, L=L, linear=False) |
def _create_merge_filelist(bam_files, base_file, config):
"""Create list of input files for merge, ensuring all files are valid.
"""
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f),
"Ensure integrity of input merge BAM files")
out_handle.write("%s\n" % f)
return bam_file_list | def function[_create_merge_filelist, parameter[bam_files, base_file, config]]:
constant[Create list of input files for merge, ensuring all files are valid.
]
variable[bam_file_list] assign[=] binary_operation[constant[%s.list] <ast.Mod object at 0x7da2590d6920> call[call[name[os].path.splitext, parameter[name[base_file]]]][constant[0]]]
variable[samtools] assign[=] call[name[config_utils].get_program, parameter[constant[samtools], name[config]]]
with call[name[open], parameter[name[bam_file_list], constant[w]]] begin[:]
for taget[name[f]] in starred[call[name[sorted], parameter[name[bam_files]]]] begin[:]
call[name[do].run, parameter[call[constant[{} quickcheck -v {}].format, parameter[name[samtools], name[f]]], constant[Ensure integrity of input merge BAM files]]]
call[name[out_handle].write, parameter[binary_operation[constant[%s
] <ast.Mod object at 0x7da2590d6920> name[f]]]]
return[name[bam_file_list]] | keyword[def] identifier[_create_merge_filelist] ( identifier[bam_files] , identifier[base_file] , identifier[config] ):
literal[string]
identifier[bam_file_list] = literal[string] % identifier[os] . identifier[path] . identifier[splitext] ( identifier[base_file] )[ literal[int] ]
identifier[samtools] = identifier[config_utils] . identifier[get_program] ( literal[string] , identifier[config] )
keyword[with] identifier[open] ( identifier[bam_file_list] , literal[string] ) keyword[as] identifier[out_handle] :
keyword[for] identifier[f] keyword[in] identifier[sorted] ( identifier[bam_files] ):
identifier[do] . identifier[run] ( literal[string] . identifier[format] ( identifier[samtools] , identifier[f] ),
literal[string] )
identifier[out_handle] . identifier[write] ( literal[string] % identifier[f] )
keyword[return] identifier[bam_file_list] | def _create_merge_filelist(bam_files, base_file, config):
"""Create list of input files for merge, ensuring all files are valid.
"""
bam_file_list = '%s.list' % os.path.splitext(base_file)[0]
samtools = config_utils.get_program('samtools', config)
with open(bam_file_list, 'w') as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f), 'Ensure integrity of input merge BAM files')
out_handle.write('%s\n' % f) # depends on [control=['for'], data=['f']] # depends on [control=['with'], data=['out_handle']]
return bam_file_list |
def first_n_items(array, n_desired):
"""Returns the first n_desired items of an array"""
# Unfortunately, we can't just do array.flat[:n_desired] here because it
# might not be a numpy.ndarray. Moreover, access to elements of the array
# could be very expensive (e.g. if it's only available over DAP), so go out
# of our way to get them in a single call to __getitem__ using only slices.
if n_desired < 1:
raise ValueError('must request at least one item')
if array.size == 0:
# work around for https://github.com/numpy/numpy/issues/5195
return []
if n_desired < array.size:
indexer = _get_indexer_at_least_n_items(array.shape, n_desired,
from_end=False)
array = array[indexer]
return np.asarray(array).flat[:n_desired] | def function[first_n_items, parameter[array, n_desired]]:
constant[Returns the first n_desired items of an array]
if compare[name[n_desired] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da18f00d810>
if compare[name[array].size equal[==] constant[0]] begin[:]
return[list[[]]]
if compare[name[n_desired] less[<] name[array].size] begin[:]
variable[indexer] assign[=] call[name[_get_indexer_at_least_n_items], parameter[name[array].shape, name[n_desired]]]
variable[array] assign[=] call[name[array]][name[indexer]]
return[call[call[name[np].asarray, parameter[name[array]]].flat][<ast.Slice object at 0x7da18f00d1b0>]] | keyword[def] identifier[first_n_items] ( identifier[array] , identifier[n_desired] ):
literal[string]
keyword[if] identifier[n_desired] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[array] . identifier[size] == literal[int] :
keyword[return] []
keyword[if] identifier[n_desired] < identifier[array] . identifier[size] :
identifier[indexer] = identifier[_get_indexer_at_least_n_items] ( identifier[array] . identifier[shape] , identifier[n_desired] ,
identifier[from_end] = keyword[False] )
identifier[array] = identifier[array] [ identifier[indexer] ]
keyword[return] identifier[np] . identifier[asarray] ( identifier[array] ). identifier[flat] [: identifier[n_desired] ] | def first_n_items(array, n_desired):
"""Returns the first n_desired items of an array"""
# Unfortunately, we can't just do array.flat[:n_desired] here because it
# might not be a numpy.ndarray. Moreover, access to elements of the array
# could be very expensive (e.g. if it's only available over DAP), so go out
# of our way to get them in a single call to __getitem__ using only slices.
if n_desired < 1:
raise ValueError('must request at least one item') # depends on [control=['if'], data=[]]
if array.size == 0:
# work around for https://github.com/numpy/numpy/issues/5195
return [] # depends on [control=['if'], data=[]]
if n_desired < array.size:
indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False)
array = array[indexer] # depends on [control=['if'], data=['n_desired']]
return np.asarray(array).flat[:n_desired] |
def receive_forever(self):
"""
Receive and handle messages in an endless loop. A message may consist
of multiple data frames, but this is not visible for onmessage().
Control messages (or control frames) are handled automatically.
"""
while True:
try:
self.onmessage(self.recv())
except (KeyboardInterrupt, SystemExit, SocketClosed):
break
except Exception as e:
self.onerror(e)
self.onclose(None, 'error: %s' % e)
try:
self.sock.close()
except socket.error:
pass
raise e | def function[receive_forever, parameter[self]]:
constant[
Receive and handle messages in an endless loop. A message may consist
of multiple data frames, but this is not visible for onmessage().
Control messages (or control frames) are handled automatically.
]
while constant[True] begin[:]
<ast.Try object at 0x7da1b0b72c50> | keyword[def] identifier[receive_forever] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
keyword[try] :
identifier[self] . identifier[onmessage] ( identifier[self] . identifier[recv] ())
keyword[except] ( identifier[KeyboardInterrupt] , identifier[SystemExit] , identifier[SocketClosed] ):
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[onerror] ( identifier[e] )
identifier[self] . identifier[onclose] ( keyword[None] , literal[string] % identifier[e] )
keyword[try] :
identifier[self] . identifier[sock] . identifier[close] ()
keyword[except] identifier[socket] . identifier[error] :
keyword[pass]
keyword[raise] identifier[e] | def receive_forever(self):
"""
Receive and handle messages in an endless loop. A message may consist
of multiple data frames, but this is not visible for onmessage().
Control messages (or control frames) are handled automatically.
"""
while True:
try:
self.onmessage(self.recv()) # depends on [control=['try'], data=[]]
except (KeyboardInterrupt, SystemExit, SocketClosed):
break # depends on [control=['except'], data=[]]
except Exception as e:
self.onerror(e)
self.onclose(None, 'error: %s' % e)
try:
self.sock.close() # depends on [control=['try'], data=[]]
except socket.error:
pass # depends on [control=['except'], data=[]]
raise e # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]] |
def val(self, name):
"""
retrieves a value, substituting actual
values for ConfigValue templates.
"""
v = getattr(self, name)
if hasattr(v, 'retrieve_value'):
v = v.retrieve_value(self.__dict__)
return v | def function[val, parameter[self, name]]:
constant[
retrieves a value, substituting actual
values for ConfigValue templates.
]
variable[v] assign[=] call[name[getattr], parameter[name[self], name[name]]]
if call[name[hasattr], parameter[name[v], constant[retrieve_value]]] begin[:]
variable[v] assign[=] call[name[v].retrieve_value, parameter[name[self].__dict__]]
return[name[v]] | keyword[def] identifier[val] ( identifier[self] , identifier[name] ):
literal[string]
identifier[v] = identifier[getattr] ( identifier[self] , identifier[name] )
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[retrieve_value] ( identifier[self] . identifier[__dict__] )
keyword[return] identifier[v] | def val(self, name):
"""
retrieves a value, substituting actual
values for ConfigValue templates.
"""
v = getattr(self, name)
if hasattr(v, 'retrieve_value'):
v = v.retrieve_value(self.__dict__) # depends on [control=['if'], data=[]]
return v |
def launch_browser(self, soup):
"""Launch a browser to display a page, for debugging purposes.
:param: soup: Page contents to display, supplied as a bs4 soup object.
"""
with tempfile.NamedTemporaryFile(delete=False, suffix='.html') as file:
file.write(soup.encode())
webbrowser.open('file://' + file.name) | def function[launch_browser, parameter[self, soup]]:
constant[Launch a browser to display a page, for debugging purposes.
:param: soup: Page contents to display, supplied as a bs4 soup object.
]
with call[name[tempfile].NamedTemporaryFile, parameter[]] begin[:]
call[name[file].write, parameter[call[name[soup].encode, parameter[]]]]
call[name[webbrowser].open, parameter[binary_operation[constant[file://] + name[file].name]]] | keyword[def] identifier[launch_browser] ( identifier[self] , identifier[soup] ):
literal[string]
keyword[with] identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[delete] = keyword[False] , identifier[suffix] = literal[string] ) keyword[as] identifier[file] :
identifier[file] . identifier[write] ( identifier[soup] . identifier[encode] ())
identifier[webbrowser] . identifier[open] ( literal[string] + identifier[file] . identifier[name] ) | def launch_browser(self, soup):
"""Launch a browser to display a page, for debugging purposes.
:param: soup: Page contents to display, supplied as a bs4 soup object.
"""
with tempfile.NamedTemporaryFile(delete=False, suffix='.html') as file:
file.write(soup.encode()) # depends on [control=['with'], data=['file']]
webbrowser.open('file://' + file.name) |
def get_data (self):
"""Get bufferd unicode data."""
data = u"".join(self.buf)
self.buf = []
return data | def function[get_data, parameter[self]]:
constant[Get bufferd unicode data.]
variable[data] assign[=] call[constant[].join, parameter[name[self].buf]]
name[self].buf assign[=] list[[]]
return[name[data]] | keyword[def] identifier[get_data] ( identifier[self] ):
literal[string]
identifier[data] = literal[string] . identifier[join] ( identifier[self] . identifier[buf] )
identifier[self] . identifier[buf] =[]
keyword[return] identifier[data] | def get_data(self):
"""Get bufferd unicode data."""
data = u''.join(self.buf)
self.buf = []
return data |
def document_frequencies(self, hashes):
'''Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
documents with that hash, pass
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param hashes: hashes to query
:paramtype hashes: list of :class:`int`
:return: map from hash to document frequency
'''
result = {}
for (k, v) in self.client.get(HASH_FREQUENCY_TABLE,
*[(h,) for h in hashes]):
if v is None:
v = 0
result[k[0]] = v
return result | def function[document_frequencies, parameter[self, hashes]]:
constant[Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
documents with that hash, pass
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param hashes: hashes to query
:paramtype hashes: list of :class:`int`
:return: map from hash to document frequency
]
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0290280>, <ast.Name object at 0x7da1b02902b0>]]] in starred[call[name[self].client.get, parameter[name[HASH_FREQUENCY_TABLE], <ast.Starred object at 0x7da1b0290ca0>]]] begin[:]
if compare[name[v] is constant[None]] begin[:]
variable[v] assign[=] constant[0]
call[name[result]][call[name[k]][constant[0]]] assign[=] name[v]
return[name[result]] | keyword[def] identifier[document_frequencies] ( identifier[self] , identifier[hashes] ):
literal[string]
identifier[result] ={}
keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[self] . identifier[client] . identifier[get] ( identifier[HASH_FREQUENCY_TABLE] ,
*[( identifier[h] ,) keyword[for] identifier[h] keyword[in] identifier[hashes] ]):
keyword[if] identifier[v] keyword[is] keyword[None] :
identifier[v] = literal[int]
identifier[result] [ identifier[k] [ literal[int] ]]= identifier[v]
keyword[return] identifier[result] | def document_frequencies(self, hashes):
"""Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
documents with that hash, pass
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param hashes: hashes to query
:paramtype hashes: list of :class:`int`
:return: map from hash to document frequency
"""
result = {}
for (k, v) in self.client.get(HASH_FREQUENCY_TABLE, *[(h,) for h in hashes]):
if v is None:
v = 0 # depends on [control=['if'], data=['v']]
result[k[0]] = v # depends on [control=['for'], data=[]]
return result |
def psstatus():
"""Shows PokerStars status such as number of players, tournaments."""
from .website.pokerstars import get_status
_print_header('PokerStars status')
status = get_status()
_print_values(
('Info updated', status.updated),
('Tables', status.tables),
('Players', status.players),
('Active tournaments', status.active_tournaments),
('Total tournaments', status.total_tournaments),
('Clubs', status.clubs),
('Club members', status.club_members),
)
site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}'
click.echo('\nSite Tables Players Tournaments')
click.echo('----------- ------ ------- -----------')
for site in status.sites:
click.echo(site_format_str.format(site)) | def function[psstatus, parameter[]]:
constant[Shows PokerStars status such as number of players, tournaments.]
from relative_module[website.pokerstars] import module[get_status]
call[name[_print_header], parameter[constant[PokerStars status]]]
variable[status] assign[=] call[name[get_status], parameter[]]
call[name[_print_values], parameter[tuple[[<ast.Constant object at 0x7da1b15f12d0>, <ast.Attribute object at 0x7da1b15f2e30>]], tuple[[<ast.Constant object at 0x7da1b15f01c0>, <ast.Attribute object at 0x7da1b15f03d0>]], tuple[[<ast.Constant object at 0x7da1b15f1570>, <ast.Attribute object at 0x7da1b15f2c20>]], tuple[[<ast.Constant object at 0x7da1b15f1630>, <ast.Attribute object at 0x7da1b15f3220>]], tuple[[<ast.Constant object at 0x7da1b15f1180>, <ast.Attribute object at 0x7da1b15f05b0>]], tuple[[<ast.Constant object at 0x7da1b15f0910>, <ast.Attribute object at 0x7da1b15f12a0>]], tuple[[<ast.Constant object at 0x7da1b15f1a50>, <ast.Attribute object at 0x7da1b15f0f70>]]]]
variable[site_format_str] assign[=] constant[{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}]
call[name[click].echo, parameter[constant[
Site Tables Players Tournaments]]]
call[name[click].echo, parameter[constant[----------- ------ ------- -----------]]]
for taget[name[site]] in starred[name[status].sites] begin[:]
call[name[click].echo, parameter[call[name[site_format_str].format, parameter[name[site]]]]] | keyword[def] identifier[psstatus] ():
literal[string]
keyword[from] . identifier[website] . identifier[pokerstars] keyword[import] identifier[get_status]
identifier[_print_header] ( literal[string] )
identifier[status] = identifier[get_status] ()
identifier[_print_values] (
( literal[string] , identifier[status] . identifier[updated] ),
( literal[string] , identifier[status] . identifier[tables] ),
( literal[string] , identifier[status] . identifier[players] ),
( literal[string] , identifier[status] . identifier[active_tournaments] ),
( literal[string] , identifier[status] . identifier[total_tournaments] ),
( literal[string] , identifier[status] . identifier[clubs] ),
( literal[string] , identifier[status] . identifier[club_members] ),
)
identifier[site_format_str] = literal[string]
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( literal[string] )
keyword[for] identifier[site] keyword[in] identifier[status] . identifier[sites] :
identifier[click] . identifier[echo] ( identifier[site_format_str] . identifier[format] ( identifier[site] )) | def psstatus():
"""Shows PokerStars status such as number of players, tournaments."""
from .website.pokerstars import get_status
_print_header('PokerStars status')
status = get_status()
_print_values(('Info updated', status.updated), ('Tables', status.tables), ('Players', status.players), ('Active tournaments', status.active_tournaments), ('Total tournaments', status.total_tournaments), ('Clubs', status.clubs), ('Club members', status.club_members))
site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}'
click.echo('\nSite Tables Players Tournaments')
click.echo('----------- ------ ------- -----------')
for site in status.sites:
click.echo(site_format_str.format(site)) # depends on [control=['for'], data=['site']] |
def renderInTable(self, relpath=""):
"""renderInTable() is called to render FITS images in a table"""
# return from cache if available
cachekey, html = self.checkCache('InTable', relpath)
if html is not None:
return html
# else regenerate
# single image: render as standard cells
if len(self.imgrec) == 1:
rec = self.imgrec[0]
# add header
html = " <TR><TD COLSPAN=2>"
html += self.renderLinkComment(relpath) or ""
html += "</TD></TR>\n"
html_img, comment = self._renderImageRec(rec, relpath, include_size=True)
html += "\n".join([
" <TR>",
" <TD>%s</TD>" % html_img,
" <TD>%s</TD>" % comment,
" </TR>\n"])
# multiple images: render a single header row, followed by one row per image
else:
# add header
html = " <TR><TD COLSPAN=2>"
html += self.renderLinkComment(relpath)
# append information on image and end the table row
html += "\n <DIV ALIGN=right><P>%s FITS cube, %d planes are given below.</P></DIV></TD></TR>\n" % (
self.cubesize, len(self.imgrec))
# now loop over images and generate a table row for each
for irec, rec in enumerate(self.imgrec):
html_img, comment = self._renderImageRec(rec, relpath)
comment = "<P>Image plane #%d.</P>%s" % (irec, comment)
html += "\n".join([
" <TR>",
" <TD>%s</TD>" % html_img,
" <TD>%s</TD>" % comment,
" </TR>\n"])
return self.writeCache(cachekey, html) | def function[renderInTable, parameter[self, relpath]]:
constant[renderInTable() is called to render FITS images in a table]
<ast.Tuple object at 0x7da1b0964dc0> assign[=] call[name[self].checkCache, parameter[constant[InTable], name[relpath]]]
if compare[name[html] is_not constant[None]] begin[:]
return[name[html]]
if compare[call[name[len], parameter[name[self].imgrec]] equal[==] constant[1]] begin[:]
variable[rec] assign[=] call[name[self].imgrec][constant[0]]
variable[html] assign[=] constant[ <TR><TD COLSPAN=2>]
<ast.AugAssign object at 0x7da1b09662f0>
<ast.AugAssign object at 0x7da1b09659c0>
<ast.Tuple object at 0x7da1b0965ae0> assign[=] call[name[self]._renderImageRec, parameter[name[rec], name[relpath]]]
<ast.AugAssign object at 0x7da1b0966020>
return[call[name[self].writeCache, parameter[name[cachekey], name[html]]]] | keyword[def] identifier[renderInTable] ( identifier[self] , identifier[relpath] = literal[string] ):
literal[string]
identifier[cachekey] , identifier[html] = identifier[self] . identifier[checkCache] ( literal[string] , identifier[relpath] )
keyword[if] identifier[html] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[html]
keyword[if] identifier[len] ( identifier[self] . identifier[imgrec] )== literal[int] :
identifier[rec] = identifier[self] . identifier[imgrec] [ literal[int] ]
identifier[html] = literal[string]
identifier[html] += identifier[self] . identifier[renderLinkComment] ( identifier[relpath] ) keyword[or] literal[string]
identifier[html] += literal[string]
identifier[html_img] , identifier[comment] = identifier[self] . identifier[_renderImageRec] ( identifier[rec] , identifier[relpath] , identifier[include_size] = keyword[True] )
identifier[html] += literal[string] . identifier[join] ([
literal[string] ,
literal[string] % identifier[html_img] ,
literal[string] % identifier[comment] ,
literal[string] ])
keyword[else] :
identifier[html] = literal[string]
identifier[html] += identifier[self] . identifier[renderLinkComment] ( identifier[relpath] )
identifier[html] += literal[string] %(
identifier[self] . identifier[cubesize] , identifier[len] ( identifier[self] . identifier[imgrec] ))
keyword[for] identifier[irec] , identifier[rec] keyword[in] identifier[enumerate] ( identifier[self] . identifier[imgrec] ):
identifier[html_img] , identifier[comment] = identifier[self] . identifier[_renderImageRec] ( identifier[rec] , identifier[relpath] )
identifier[comment] = literal[string] %( identifier[irec] , identifier[comment] )
identifier[html] += literal[string] . identifier[join] ([
literal[string] ,
literal[string] % identifier[html_img] ,
literal[string] % identifier[comment] ,
literal[string] ])
keyword[return] identifier[self] . identifier[writeCache] ( identifier[cachekey] , identifier[html] ) | def renderInTable(self, relpath=''):
"""renderInTable() is called to render FITS images in a table"""
# return from cache if available
(cachekey, html) = self.checkCache('InTable', relpath)
if html is not None:
return html # depends on [control=['if'], data=['html']]
# else regenerate
# single image: render as standard cells
if len(self.imgrec) == 1:
rec = self.imgrec[0]
# add header
html = ' <TR><TD COLSPAN=2>'
html += self.renderLinkComment(relpath) or ''
html += '</TD></TR>\n'
(html_img, comment) = self._renderImageRec(rec, relpath, include_size=True)
html += '\n'.join([' <TR>', ' <TD>%s</TD>' % html_img, ' <TD>%s</TD>' % comment, ' </TR>\n']) # depends on [control=['if'], data=[]]
else:
# multiple images: render a single header row, followed by one row per image
# add header
html = ' <TR><TD COLSPAN=2>'
html += self.renderLinkComment(relpath)
# append information on image and end the table row
html += '\n <DIV ALIGN=right><P>%s FITS cube, %d planes are given below.</P></DIV></TD></TR>\n' % (self.cubesize, len(self.imgrec))
# now loop over images and generate a table row for each
for (irec, rec) in enumerate(self.imgrec):
(html_img, comment) = self._renderImageRec(rec, relpath)
comment = '<P>Image plane #%d.</P>%s' % (irec, comment)
html += '\n'.join([' <TR>', ' <TD>%s</TD>' % html_img, ' <TD>%s</TD>' % comment, ' </TR>\n']) # depends on [control=['for'], data=[]]
return self.writeCache(cachekey, html) |
def mmGetPlotUnionSDRActivity(self, title="Union SDR Activity Raster",
showReset=False, resetShading=0.25):
""" Returns plot of the activity of union SDR bits.
@param title an optional title for the figure
@param showReset if true, the first set of activities after a reset
will have a gray background
@param resetShading If showReset is true, this float specifies the
intensity of the reset background with 0.0 being white and 1.0 being black
@return (Plot) plot
"""
unionSDRTrace = self.mmGetTraceUnionSDR().data
columnCount = self.getNumColumns()
activityType = "Union SDR Activity"
return self.mmGetCellTracePlot(unionSDRTrace, columnCount, activityType,
title=title, showReset=showReset,
resetShading=resetShading) | def function[mmGetPlotUnionSDRActivity, parameter[self, title, showReset, resetShading]]:
constant[ Returns plot of the activity of union SDR bits.
@param title an optional title for the figure
@param showReset if true, the first set of activities after a reset
will have a gray background
@param resetShading If showReset is true, this float specifies the
intensity of the reset background with 0.0 being white and 1.0 being black
@return (Plot) plot
]
variable[unionSDRTrace] assign[=] call[name[self].mmGetTraceUnionSDR, parameter[]].data
variable[columnCount] assign[=] call[name[self].getNumColumns, parameter[]]
variable[activityType] assign[=] constant[Union SDR Activity]
return[call[name[self].mmGetCellTracePlot, parameter[name[unionSDRTrace], name[columnCount], name[activityType]]]] | keyword[def] identifier[mmGetPlotUnionSDRActivity] ( identifier[self] , identifier[title] = literal[string] ,
identifier[showReset] = keyword[False] , identifier[resetShading] = literal[int] ):
literal[string]
identifier[unionSDRTrace] = identifier[self] . identifier[mmGetTraceUnionSDR] (). identifier[data]
identifier[columnCount] = identifier[self] . identifier[getNumColumns] ()
identifier[activityType] = literal[string]
keyword[return] identifier[self] . identifier[mmGetCellTracePlot] ( identifier[unionSDRTrace] , identifier[columnCount] , identifier[activityType] ,
identifier[title] = identifier[title] , identifier[showReset] = identifier[showReset] ,
identifier[resetShading] = identifier[resetShading] ) | def mmGetPlotUnionSDRActivity(self, title='Union SDR Activity Raster', showReset=False, resetShading=0.25):
""" Returns plot of the activity of union SDR bits.
@param title an optional title for the figure
@param showReset if true, the first set of activities after a reset
will have a gray background
@param resetShading If showReset is true, this float specifies the
intensity of the reset background with 0.0 being white and 1.0 being black
@return (Plot) plot
"""
unionSDRTrace = self.mmGetTraceUnionSDR().data
columnCount = self.getNumColumns()
activityType = 'Union SDR Activity'
return self.mmGetCellTracePlot(unionSDRTrace, columnCount, activityType, title=title, showReset=showReset, resetShading=resetShading) |
def unit(self, expression, default=None):
"""Returns the unit (an astropy.unit.Units object) for the expression.
Example
>>> import vaex
>>> ds = vaex.example()
>>> df.unit("x")
Unit("kpc")
>>> df.unit("x*L")
Unit("km kpc2 / s")
:param expression: Expression, which can be a column name
:param default: if no unit is known, it will return this
:return: The resulting unit of the expression
:rtype: astropy.units.Unit
"""
expression = _ensure_string_from_expression(expression)
try:
# if an expression like pi * <some_expr> it will evaluate to a quantity instead of a unit
unit_or_quantity = eval(expression, expression_namespace, scopes.UnitScope(self))
unit = unit_or_quantity.unit if hasattr(unit_or_quantity, "unit") else unit_or_quantity
return unit if isinstance(unit, astropy.units.Unit) else None
except:
# logger.exception("error evaluating unit expression: %s", expression)
# astropy doesn't add units, so we try with a quatiti
try:
return eval(expression, expression_namespace, scopes.UnitScope(self, 1.)).unit
except:
# logger.exception("error evaluating unit expression: %s", expression)
return default | def function[unit, parameter[self, expression, default]]:
constant[Returns the unit (an astropy.unit.Units object) for the expression.
Example
>>> import vaex
>>> ds = vaex.example()
>>> df.unit("x")
Unit("kpc")
>>> df.unit("x*L")
Unit("km kpc2 / s")
:param expression: Expression, which can be a column name
:param default: if no unit is known, it will return this
:return: The resulting unit of the expression
:rtype: astropy.units.Unit
]
variable[expression] assign[=] call[name[_ensure_string_from_expression], parameter[name[expression]]]
<ast.Try object at 0x7da18bc711b0> | keyword[def] identifier[unit] ( identifier[self] , identifier[expression] , identifier[default] = keyword[None] ):
literal[string]
identifier[expression] = identifier[_ensure_string_from_expression] ( identifier[expression] )
keyword[try] :
identifier[unit_or_quantity] = identifier[eval] ( identifier[expression] , identifier[expression_namespace] , identifier[scopes] . identifier[UnitScope] ( identifier[self] ))
identifier[unit] = identifier[unit_or_quantity] . identifier[unit] keyword[if] identifier[hasattr] ( identifier[unit_or_quantity] , literal[string] ) keyword[else] identifier[unit_or_quantity]
keyword[return] identifier[unit] keyword[if] identifier[isinstance] ( identifier[unit] , identifier[astropy] . identifier[units] . identifier[Unit] ) keyword[else] keyword[None]
keyword[except] :
keyword[try] :
keyword[return] identifier[eval] ( identifier[expression] , identifier[expression_namespace] , identifier[scopes] . identifier[UnitScope] ( identifier[self] , literal[int] )). identifier[unit]
keyword[except] :
keyword[return] identifier[default] | def unit(self, expression, default=None):
"""Returns the unit (an astropy.unit.Units object) for the expression.
Example
>>> import vaex
>>> ds = vaex.example()
>>> df.unit("x")
Unit("kpc")
>>> df.unit("x*L")
Unit("km kpc2 / s")
:param expression: Expression, which can be a column name
:param default: if no unit is known, it will return this
:return: The resulting unit of the expression
:rtype: astropy.units.Unit
"""
expression = _ensure_string_from_expression(expression)
try:
# if an expression like pi * <some_expr> it will evaluate to a quantity instead of a unit
unit_or_quantity = eval(expression, expression_namespace, scopes.UnitScope(self))
unit = unit_or_quantity.unit if hasattr(unit_or_quantity, 'unit') else unit_or_quantity
return unit if isinstance(unit, astropy.units.Unit) else None # depends on [control=['try'], data=[]]
except:
# logger.exception("error evaluating unit expression: %s", expression)
# astropy doesn't add units, so we try with a quatiti
try:
return eval(expression, expression_namespace, scopes.UnitScope(self, 1.0)).unit # depends on [control=['try'], data=[]]
except:
# logger.exception("error evaluating unit expression: %s", expression)
return default # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] |
def start_notebook(self, name, context: dict, fg=False):
"""Start new IPython Notebook daemon.
:param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed.
:param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook
"""
assert context
assert type(context) == dict
assert "context_hash" in context
assert type(context["context_hash"]) == int
http_port = self.pick_port()
assert http_port
context = context.copy()
context["http_port"] = http_port
# We can't proxy websocket URLs, so let them go directly through localhost or have front end server to do proxying (nginx)
if "websocket_url" not in context:
context["websocket_url"] = "ws://localhost:{port}".format(port=http_port)
if "{port}" in context["websocket_url"]:
# Do port substitution for the websocket URL
context["websocket_url"] = context["websocket_url"].format(port=http_port)
pid = self.get_pid(name)
assert "terminated" not in context
comm.set_context(pid, context)
if fg:
self.exec_notebook_daemon_command(name, "fg", port=http_port)
else:
self.exec_notebook_daemon_command(name, "start", port=http_port) | def function[start_notebook, parameter[self, name, context, fg]]:
constant[Start new IPython Notebook daemon.
:param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed.
:param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook
]
assert[name[context]]
assert[compare[call[name[type], parameter[name[context]]] equal[==] name[dict]]]
assert[compare[constant[context_hash] in name[context]]]
assert[compare[call[name[type], parameter[call[name[context]][constant[context_hash]]]] equal[==] name[int]]]
variable[http_port] assign[=] call[name[self].pick_port, parameter[]]
assert[name[http_port]]
variable[context] assign[=] call[name[context].copy, parameter[]]
call[name[context]][constant[http_port]] assign[=] name[http_port]
if compare[constant[websocket_url] <ast.NotIn object at 0x7da2590d7190> name[context]] begin[:]
call[name[context]][constant[websocket_url]] assign[=] call[constant[ws://localhost:{port}].format, parameter[]]
if compare[constant[{port}] in call[name[context]][constant[websocket_url]]] begin[:]
call[name[context]][constant[websocket_url]] assign[=] call[call[name[context]][constant[websocket_url]].format, parameter[]]
variable[pid] assign[=] call[name[self].get_pid, parameter[name[name]]]
assert[compare[constant[terminated] <ast.NotIn object at 0x7da2590d7190> name[context]]]
call[name[comm].set_context, parameter[name[pid], name[context]]]
if name[fg] begin[:]
call[name[self].exec_notebook_daemon_command, parameter[name[name], constant[fg]]] | keyword[def] identifier[start_notebook] ( identifier[self] , identifier[name] , identifier[context] : identifier[dict] , identifier[fg] = keyword[False] ):
literal[string]
keyword[assert] identifier[context]
keyword[assert] identifier[type] ( identifier[context] )== identifier[dict]
keyword[assert] literal[string] keyword[in] identifier[context]
keyword[assert] identifier[type] ( identifier[context] [ literal[string] ])== identifier[int]
identifier[http_port] = identifier[self] . identifier[pick_port] ()
keyword[assert] identifier[http_port]
identifier[context] = identifier[context] . identifier[copy] ()
identifier[context] [ literal[string] ]= identifier[http_port]
keyword[if] literal[string] keyword[not] keyword[in] identifier[context] :
identifier[context] [ literal[string] ]= literal[string] . identifier[format] ( identifier[port] = identifier[http_port] )
keyword[if] literal[string] keyword[in] identifier[context] [ literal[string] ]:
identifier[context] [ literal[string] ]= identifier[context] [ literal[string] ]. identifier[format] ( identifier[port] = identifier[http_port] )
identifier[pid] = identifier[self] . identifier[get_pid] ( identifier[name] )
keyword[assert] literal[string] keyword[not] keyword[in] identifier[context]
identifier[comm] . identifier[set_context] ( identifier[pid] , identifier[context] )
keyword[if] identifier[fg] :
identifier[self] . identifier[exec_notebook_daemon_command] ( identifier[name] , literal[string] , identifier[port] = identifier[http_port] )
keyword[else] :
identifier[self] . identifier[exec_notebook_daemon_command] ( identifier[name] , literal[string] , identifier[port] = identifier[http_port] ) | def start_notebook(self, name, context: dict, fg=False):
"""Start new IPython Notebook daemon.
:param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed.
:param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook
"""
assert context
assert type(context) == dict
assert 'context_hash' in context
assert type(context['context_hash']) == int
http_port = self.pick_port()
assert http_port
context = context.copy()
context['http_port'] = http_port
# We can't proxy websocket URLs, so let them go directly through localhost or have front end server to do proxying (nginx)
if 'websocket_url' not in context:
context['websocket_url'] = 'ws://localhost:{port}'.format(port=http_port) # depends on [control=['if'], data=['context']]
if '{port}' in context['websocket_url']:
# Do port substitution for the websocket URL
context['websocket_url'] = context['websocket_url'].format(port=http_port) # depends on [control=['if'], data=[]]
pid = self.get_pid(name)
assert 'terminated' not in context
comm.set_context(pid, context)
if fg:
self.exec_notebook_daemon_command(name, 'fg', port=http_port) # depends on [control=['if'], data=[]]
else:
self.exec_notebook_daemon_command(name, 'start', port=http_port) |
def _make_resource_result(f, futmap):
"""
Map per-resource results to per-resource futures in futmap.
The result value of each (successful) future is a ConfigResource.
"""
try:
result = f.result()
for resource, configs in result.items():
fut = futmap.get(resource, None)
if fut is None:
raise RuntimeError("Resource {} not found in future-map: {}".format(resource, futmap))
if resource.error is not None:
# Resource-level exception
fut.set_exception(KafkaException(resource.error))
else:
# Resource-level success
# configs will be a dict for describe_configs()
# and None for alter_configs()
fut.set_result(configs)
except Exception as e:
# Request-level exception, raise the same for all resources
for resource, fut in futmap.items():
fut.set_exception(e) | def function[_make_resource_result, parameter[f, futmap]]:
constant[
Map per-resource results to per-resource futures in futmap.
The result value of each (successful) future is a ConfigResource.
]
<ast.Try object at 0x7da18fe90a60> | keyword[def] identifier[_make_resource_result] ( identifier[f] , identifier[futmap] ):
literal[string]
keyword[try] :
identifier[result] = identifier[f] . identifier[result] ()
keyword[for] identifier[resource] , identifier[configs] keyword[in] identifier[result] . identifier[items] ():
identifier[fut] = identifier[futmap] . identifier[get] ( identifier[resource] , keyword[None] )
keyword[if] identifier[fut] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[resource] , identifier[futmap] ))
keyword[if] identifier[resource] . identifier[error] keyword[is] keyword[not] keyword[None] :
identifier[fut] . identifier[set_exception] ( identifier[KafkaException] ( identifier[resource] . identifier[error] ))
keyword[else] :
identifier[fut] . identifier[set_result] ( identifier[configs] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[for] identifier[resource] , identifier[fut] keyword[in] identifier[futmap] . identifier[items] ():
identifier[fut] . identifier[set_exception] ( identifier[e] ) | def _make_resource_result(f, futmap):
"""
Map per-resource results to per-resource futures in futmap.
The result value of each (successful) future is a ConfigResource.
"""
try:
result = f.result()
for (resource, configs) in result.items():
fut = futmap.get(resource, None)
if fut is None:
raise RuntimeError('Resource {} not found in future-map: {}'.format(resource, futmap)) # depends on [control=['if'], data=[]]
if resource.error is not None:
# Resource-level exception
fut.set_exception(KafkaException(resource.error)) # depends on [control=['if'], data=[]]
else:
# Resource-level success
# configs will be a dict for describe_configs()
# and None for alter_configs()
fut.set_result(configs) # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
# Request-level exception, raise the same for all resources
for (resource, fut) in futmap.items():
fut.set_exception(e) # depends on [control=['for'], data=[]] # depends on [control=['except'], data=['e']] |
def is_valid_element(element):
"""
Check that the element can be manipulated by HaTeMiLe.
:param element: The element
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if element can be manipulated or False if element cannot
be manipulated.
:rtype: bool
"""
if element.has_attribute(CommonFunctions.DATA_IGNORE):
return False
else:
parent_element = element.get_parent_element()
if parent_element is not None:
tag_name = parent_element.get_tag_name()
if (tag_name != 'BODY') and (tag_name != 'HTML'):
return CommonFunctions.is_valid_element(parent_element)
return True
return True | def function[is_valid_element, parameter[element]]:
constant[
Check that the element can be manipulated by HaTeMiLe.
:param element: The element
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if element can be manipulated or False if element cannot
be manipulated.
:rtype: bool
]
if call[name[element].has_attribute, parameter[name[CommonFunctions].DATA_IGNORE]] begin[:]
return[constant[False]] | keyword[def] identifier[is_valid_element] ( identifier[element] ):
literal[string]
keyword[if] identifier[element] . identifier[has_attribute] ( identifier[CommonFunctions] . identifier[DATA_IGNORE] ):
keyword[return] keyword[False]
keyword[else] :
identifier[parent_element] = identifier[element] . identifier[get_parent_element] ()
keyword[if] identifier[parent_element] keyword[is] keyword[not] keyword[None] :
identifier[tag_name] = identifier[parent_element] . identifier[get_tag_name] ()
keyword[if] ( identifier[tag_name] != literal[string] ) keyword[and] ( identifier[tag_name] != literal[string] ):
keyword[return] identifier[CommonFunctions] . identifier[is_valid_element] ( identifier[parent_element] )
keyword[return] keyword[True]
keyword[return] keyword[True] | def is_valid_element(element):
"""
Check that the element can be manipulated by HaTeMiLe.
:param element: The element
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if element can be manipulated or False if element cannot
be manipulated.
:rtype: bool
"""
if element.has_attribute(CommonFunctions.DATA_IGNORE):
return False # depends on [control=['if'], data=[]]
else:
parent_element = element.get_parent_element()
if parent_element is not None:
tag_name = parent_element.get_tag_name()
if tag_name != 'BODY' and tag_name != 'HTML':
return CommonFunctions.is_valid_element(parent_element) # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=['parent_element']]
return True |
def play():
'Play a game of naughts and crosses against the computer.'
ai = {'X': player_move, 'O': random_move}
board = Board()
while not board.winner:
x, y = ai[board.player](board)
board = board.make_move(x, y)
print(board, end='\n\n')
print(board.winner) | def function[play, parameter[]]:
constant[Play a game of naughts and crosses against the computer.]
variable[ai] assign[=] dictionary[[<ast.Constant object at 0x7da20c6ab9a0>, <ast.Constant object at 0x7da20c6ab070>], [<ast.Name object at 0x7da20c6a8730>, <ast.Name object at 0x7da20c6a81c0>]]
variable[board] assign[=] call[name[Board], parameter[]]
while <ast.UnaryOp object at 0x7da20c6aa5f0> begin[:]
<ast.Tuple object at 0x7da20c6a88e0> assign[=] call[call[name[ai]][name[board].player], parameter[name[board]]]
variable[board] assign[=] call[name[board].make_move, parameter[name[x], name[y]]]
call[name[print], parameter[name[board]]]
call[name[print], parameter[name[board].winner]] | keyword[def] identifier[play] ():
literal[string]
identifier[ai] ={ literal[string] : identifier[player_move] , literal[string] : identifier[random_move] }
identifier[board] = identifier[Board] ()
keyword[while] keyword[not] identifier[board] . identifier[winner] :
identifier[x] , identifier[y] = identifier[ai] [ identifier[board] . identifier[player] ]( identifier[board] )
identifier[board] = identifier[board] . identifier[make_move] ( identifier[x] , identifier[y] )
identifier[print] ( identifier[board] , identifier[end] = literal[string] )
identifier[print] ( identifier[board] . identifier[winner] ) | def play():
"""Play a game of naughts and crosses against the computer."""
ai = {'X': player_move, 'O': random_move}
board = Board()
while not board.winner:
(x, y) = ai[board.player](board)
board = board.make_move(x, y) # depends on [control=['while'], data=[]]
print(board, end='\n\n')
print(board.winner) |
def task(func):
"""Decorator to run the decorated function as a Task
"""
def task_wrapper(*args, **kwargs):
return spawn(func, *args, **kwargs)
return task_wrapper | def function[task, parameter[func]]:
constant[Decorator to run the decorated function as a Task
]
def function[task_wrapper, parameter[]]:
return[call[name[spawn], parameter[name[func], <ast.Starred object at 0x7da18f09c400>]]]
return[name[task_wrapper]] | keyword[def] identifier[task] ( identifier[func] ):
literal[string]
keyword[def] identifier[task_wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[spawn] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[task_wrapper] | def task(func):
"""Decorator to run the decorated function as a Task
"""
def task_wrapper(*args, **kwargs):
return spawn(func, *args, **kwargs)
return task_wrapper |
def from_spec(cls, spec):
"""
Load a Custodian instance where the jobs are specified from a
structure and a spec dict. This allows simple
custom job sequences to be constructed quickly via a YAML file.
Args:
spec (dict): A dict specifying job. A sample of the dict in
YAML format for the usual MP workflow is given as follows
```
jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
jobs_common_params:
vasp_cmd: /opt/vasp
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
scratch_dir: /tmp
```
The `jobs` key is a list of jobs. Each job is
specified via "job": <explicit path>, and all parameters are
specified via `params` which is a dict.
`common_params` specify a common set of parameters that are
passed to all jobs, e.g., vasp_cmd.
Returns:
Custodian instance.
"""
dec = MontyDecoder()
def load_class(dotpath):
modname, classname = dotpath.rsplit(".", 1)
mod = __import__(modname, globals(), locals(), [classname], 0)
return getattr(mod, classname)
def process_params(d):
decoded = {}
for k, v in d.items():
if k.startswith("$"):
if isinstance(v, list):
v = [os.path.expandvars(i) for i in v]
elif isinstance(v, dict):
v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()}
else:
v = os.path.expandvars(v)
decoded[k.strip("$")] = dec.process_decoded(v)
return decoded
jobs = []
common_params = process_params(spec.get("jobs_common_params", {}))
for d in spec["jobs"]:
cls_ = load_class(d["jb"])
params = process_params(d.get("params", {}))
params.update(common_params)
jobs.append(cls_(**params))
handlers = []
for d in spec.get("handlers", []):
cls_ = load_class(d["hdlr"])
params = process_params(d.get("params", {}))
handlers.append(cls_(**params))
validators = []
for d in spec.get("validators", []):
cls_ = load_class(d["vldr"])
params = process_params(d.get("params", {}))
validators.append(cls_(**params))
custodian_params = process_params(spec.get("custodian_params", {}))
return cls(jobs=jobs, handlers=handlers, validators=validators,
**custodian_params) | def function[from_spec, parameter[cls, spec]]:
constant[
Load a Custodian instance where the jobs are specified from a
structure and a spec dict. This allows simple
custom job sequences to be constructed quickly via a YAML file.
Args:
spec (dict): A dict specifying job. A sample of the dict in
YAML format for the usual MP workflow is given as follows
```
jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
jobs_common_params:
vasp_cmd: /opt/vasp
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
scratch_dir: /tmp
```
The `jobs` key is a list of jobs. Each job is
specified via "job": <explicit path>, and all parameters are
specified via `params` which is a dict.
`common_params` specify a common set of parameters that are
passed to all jobs, e.g., vasp_cmd.
Returns:
Custodian instance.
]
variable[dec] assign[=] call[name[MontyDecoder], parameter[]]
def function[load_class, parameter[dotpath]]:
<ast.Tuple object at 0x7da1b0575ae0> assign[=] call[name[dotpath].rsplit, parameter[constant[.], constant[1]]]
variable[mod] assign[=] call[name[__import__], parameter[name[modname], call[name[globals], parameter[]], call[name[locals], parameter[]], list[[<ast.Name object at 0x7da1b0575540>]], constant[0]]]
return[call[name[getattr], parameter[name[mod], name[classname]]]]
def function[process_params, parameter[d]]:
variable[decoded] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0576350>, <ast.Name object at 0x7da1b0576380>]]] in starred[call[name[d].items, parameter[]]] begin[:]
if call[name[k].startswith, parameter[constant[$]]] begin[:]
if call[name[isinstance], parameter[name[v], name[list]]] begin[:]
variable[v] assign[=] <ast.ListComp object at 0x7da1b0576260>
call[name[decoded]][call[name[k].strip, parameter[constant[$]]]] assign[=] call[name[dec].process_decoded, parameter[name[v]]]
return[name[decoded]]
variable[jobs] assign[=] list[[]]
variable[common_params] assign[=] call[name[process_params], parameter[call[name[spec].get, parameter[constant[jobs_common_params], dictionary[[], []]]]]]
for taget[name[d]] in starred[call[name[spec]][constant[jobs]]] begin[:]
variable[cls_] assign[=] call[name[load_class], parameter[call[name[d]][constant[jb]]]]
variable[params] assign[=] call[name[process_params], parameter[call[name[d].get, parameter[constant[params], dictionary[[], []]]]]]
call[name[params].update, parameter[name[common_params]]]
call[name[jobs].append, parameter[call[name[cls_], parameter[]]]]
variable[handlers] assign[=] list[[]]
for taget[name[d]] in starred[call[name[spec].get, parameter[constant[handlers], list[[]]]]] begin[:]
variable[cls_] assign[=] call[name[load_class], parameter[call[name[d]][constant[hdlr]]]]
variable[params] assign[=] call[name[process_params], parameter[call[name[d].get, parameter[constant[params], dictionary[[], []]]]]]
call[name[handlers].append, parameter[call[name[cls_], parameter[]]]]
variable[validators] assign[=] list[[]]
for taget[name[d]] in starred[call[name[spec].get, parameter[constant[validators], list[[]]]]] begin[:]
variable[cls_] assign[=] call[name[load_class], parameter[call[name[d]][constant[vldr]]]]
variable[params] assign[=] call[name[process_params], parameter[call[name[d].get, parameter[constant[params], dictionary[[], []]]]]]
call[name[validators].append, parameter[call[name[cls_], parameter[]]]]
variable[custodian_params] assign[=] call[name[process_params], parameter[call[name[spec].get, parameter[constant[custodian_params], dictionary[[], []]]]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_spec] ( identifier[cls] , identifier[spec] ):
literal[string]
identifier[dec] = identifier[MontyDecoder] ()
keyword[def] identifier[load_class] ( identifier[dotpath] ):
identifier[modname] , identifier[classname] = identifier[dotpath] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[mod] = identifier[__import__] ( identifier[modname] , identifier[globals] (), identifier[locals] (),[ identifier[classname] ], literal[int] )
keyword[return] identifier[getattr] ( identifier[mod] , identifier[classname] )
keyword[def] identifier[process_params] ( identifier[d] ):
identifier[decoded] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] ():
keyword[if] identifier[k] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[isinstance] ( identifier[v] , identifier[list] ):
identifier[v] =[ identifier[os] . identifier[path] . identifier[expandvars] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[v] ]
keyword[elif] identifier[isinstance] ( identifier[v] , identifier[dict] ):
identifier[v] ={ identifier[k2] : identifier[os] . identifier[path] . identifier[expandvars] ( identifier[v2] ) keyword[for] identifier[k2] , identifier[v2] keyword[in] identifier[v] . identifier[items] ()}
keyword[else] :
identifier[v] = identifier[os] . identifier[path] . identifier[expandvars] ( identifier[v] )
identifier[decoded] [ identifier[k] . identifier[strip] ( literal[string] )]= identifier[dec] . identifier[process_decoded] ( identifier[v] )
keyword[return] identifier[decoded]
identifier[jobs] =[]
identifier[common_params] = identifier[process_params] ( identifier[spec] . identifier[get] ( literal[string] ,{}))
keyword[for] identifier[d] keyword[in] identifier[spec] [ literal[string] ]:
identifier[cls_] = identifier[load_class] ( identifier[d] [ literal[string] ])
identifier[params] = identifier[process_params] ( identifier[d] . identifier[get] ( literal[string] ,{}))
identifier[params] . identifier[update] ( identifier[common_params] )
identifier[jobs] . identifier[append] ( identifier[cls_] (** identifier[params] ))
identifier[handlers] =[]
keyword[for] identifier[d] keyword[in] identifier[spec] . identifier[get] ( literal[string] ,[]):
identifier[cls_] = identifier[load_class] ( identifier[d] [ literal[string] ])
identifier[params] = identifier[process_params] ( identifier[d] . identifier[get] ( literal[string] ,{}))
identifier[handlers] . identifier[append] ( identifier[cls_] (** identifier[params] ))
identifier[validators] =[]
keyword[for] identifier[d] keyword[in] identifier[spec] . identifier[get] ( literal[string] ,[]):
identifier[cls_] = identifier[load_class] ( identifier[d] [ literal[string] ])
identifier[params] = identifier[process_params] ( identifier[d] . identifier[get] ( literal[string] ,{}))
identifier[validators] . identifier[append] ( identifier[cls_] (** identifier[params] ))
identifier[custodian_params] = identifier[process_params] ( identifier[spec] . identifier[get] ( literal[string] ,{}))
keyword[return] identifier[cls] ( identifier[jobs] = identifier[jobs] , identifier[handlers] = identifier[handlers] , identifier[validators] = identifier[validators] ,
** identifier[custodian_params] ) | def from_spec(cls, spec):
"""
Load a Custodian instance where the jobs are specified from a
structure and a spec dict. This allows simple
custom job sequences to be constructed quickly via a YAML file.
Args:
spec (dict): A dict specifying job. A sample of the dict in
YAML format for the usual MP workflow is given as follows
```
jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
jobs_common_params:
vasp_cmd: /opt/vasp
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
scratch_dir: /tmp
```
The `jobs` key is a list of jobs. Each job is
specified via "job": <explicit path>, and all parameters are
specified via `params` which is a dict.
`common_params` specify a common set of parameters that are
passed to all jobs, e.g., vasp_cmd.
Returns:
Custodian instance.
"""
dec = MontyDecoder()
def load_class(dotpath):
(modname, classname) = dotpath.rsplit('.', 1)
mod = __import__(modname, globals(), locals(), [classname], 0)
return getattr(mod, classname)
def process_params(d):
decoded = {}
for (k, v) in d.items():
if k.startswith('$'):
if isinstance(v, list):
v = [os.path.expandvars(i) for i in v] # depends on [control=['if'], data=[]]
elif isinstance(v, dict):
v = {k2: os.path.expandvars(v2) for (k2, v2) in v.items()} # depends on [control=['if'], data=[]]
else:
v = os.path.expandvars(v) # depends on [control=['if'], data=[]]
decoded[k.strip('$')] = dec.process_decoded(v) # depends on [control=['for'], data=[]]
return decoded
jobs = []
common_params = process_params(spec.get('jobs_common_params', {}))
for d in spec['jobs']:
cls_ = load_class(d['jb'])
params = process_params(d.get('params', {}))
params.update(common_params)
jobs.append(cls_(**params)) # depends on [control=['for'], data=['d']]
handlers = []
for d in spec.get('handlers', []):
cls_ = load_class(d['hdlr'])
params = process_params(d.get('params', {}))
handlers.append(cls_(**params)) # depends on [control=['for'], data=['d']]
validators = []
for d in spec.get('validators', []):
cls_ = load_class(d['vldr'])
params = process_params(d.get('params', {}))
validators.append(cls_(**params)) # depends on [control=['for'], data=['d']]
custodian_params = process_params(spec.get('custodian_params', {}))
return cls(jobs=jobs, handlers=handlers, validators=validators, **custodian_params) |
def fix_keys(self):
"""
Remove the _ from the keys of the results
"""
if not self.valid:
return
for hit in self._results['hits']['hits']:
for key, item in list(hit.items()):
if key.startswith("_"):
hit[key[1:]] = item
del hit[key] | def function[fix_keys, parameter[self]]:
constant[
Remove the _ from the keys of the results
]
if <ast.UnaryOp object at 0x7da1b0e6c0a0> begin[:]
return[None]
for taget[name[hit]] in starred[call[call[name[self]._results][constant[hits]]][constant[hits]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0e6d1b0>, <ast.Name object at 0x7da1b0e6e6b0>]]] in starred[call[name[list], parameter[call[name[hit].items, parameter[]]]]] begin[:]
if call[name[key].startswith, parameter[constant[_]]] begin[:]
call[name[hit]][call[name[key]][<ast.Slice object at 0x7da1b0e6e290>]] assign[=] name[item]
<ast.Delete object at 0x7da1b0e6e470> | keyword[def] identifier[fix_keys] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[valid] :
keyword[return]
keyword[for] identifier[hit] keyword[in] identifier[self] . identifier[_results] [ literal[string] ][ literal[string] ]:
keyword[for] identifier[key] , identifier[item] keyword[in] identifier[list] ( identifier[hit] . identifier[items] ()):
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[hit] [ identifier[key] [ literal[int] :]]= identifier[item]
keyword[del] identifier[hit] [ identifier[key] ] | def fix_keys(self):
"""
Remove the _ from the keys of the results
"""
if not self.valid:
return # depends on [control=['if'], data=[]]
for hit in self._results['hits']['hits']:
for (key, item) in list(hit.items()):
if key.startswith('_'):
hit[key[1:]] = item
del hit[key] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['hit']] |
def _fix_up_properties(cls):
"""Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
"""
# Verify that _get_kind() returns an 8-bit string.
kind = cls._get_kind()
if not isinstance(kind, basestring):
raise KindError('Class %s defines a _get_kind() method that returns '
'a non-string (%r)' % (cls.__name__, kind))
if not isinstance(kind, str):
try:
kind = kind.encode('ascii') # ASCII contents is okay.
except UnicodeEncodeError:
raise KindError('Class %s defines a _get_kind() method that returns '
'a Unicode string (%r); please encode using utf-8' %
(cls.__name__, kind))
cls._properties = {} # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, ModelAttribute) and not isinstance(attr, ModelKey):
if name.startswith('_'):
raise TypeError('ModelAttribute %s cannot begin with an underscore '
'character. _ prefixed attributes are reserved for '
'temporary Model instance values.' % name)
attr._fix_up(cls, name)
if isinstance(attr, Property):
if (attr._repeated or
(isinstance(attr, StructuredProperty) and
attr._modelclass._has_repeated)):
cls._has_repeated = True
cls._properties[attr._name] = attr
cls._update_kind_map() | def function[_fix_up_properties, parameter[cls]]:
constant[Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
]
variable[kind] assign[=] call[name[cls]._get_kind, parameter[]]
if <ast.UnaryOp object at 0x7da18ede7910> begin[:]
<ast.Raise object at 0x7da18ede5840>
if <ast.UnaryOp object at 0x7da18ede59c0> begin[:]
<ast.Try object at 0x7da18ede63e0>
name[cls]._properties assign[=] dictionary[[], []]
if compare[name[cls].__module__ equal[==] name[__name__]] begin[:]
return[None]
for taget[name[name]] in starred[call[name[set], parameter[call[name[dir], parameter[name[cls]]]]]] begin[:]
variable[attr] assign[=] call[name[getattr], parameter[name[cls], name[name], constant[None]]]
if <ast.BoolOp object at 0x7da18ede67a0> begin[:]
if call[name[name].startswith, parameter[constant[_]]] begin[:]
<ast.Raise object at 0x7da18ede54e0>
call[name[attr]._fix_up, parameter[name[cls], name[name]]]
if call[name[isinstance], parameter[name[attr], name[Property]]] begin[:]
if <ast.BoolOp object at 0x7da18ede5f00> begin[:]
name[cls]._has_repeated assign[=] constant[True]
call[name[cls]._properties][name[attr]._name] assign[=] name[attr]
call[name[cls]._update_kind_map, parameter[]] | keyword[def] identifier[_fix_up_properties] ( identifier[cls] ):
literal[string]
identifier[kind] = identifier[cls] . identifier[_get_kind] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[kind] , identifier[basestring] ):
keyword[raise] identifier[KindError] ( literal[string]
literal[string] %( identifier[cls] . identifier[__name__] , identifier[kind] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[kind] , identifier[str] ):
keyword[try] :
identifier[kind] = identifier[kind] . identifier[encode] ( literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
keyword[raise] identifier[KindError] ( literal[string]
literal[string] %
( identifier[cls] . identifier[__name__] , identifier[kind] ))
identifier[cls] . identifier[_properties] ={}
keyword[if] identifier[cls] . identifier[__module__] == identifier[__name__] :
keyword[return]
keyword[for] identifier[name] keyword[in] identifier[set] ( identifier[dir] ( identifier[cls] )):
identifier[attr] = identifier[getattr] ( identifier[cls] , identifier[name] , keyword[None] )
keyword[if] identifier[isinstance] ( identifier[attr] , identifier[ModelAttribute] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[attr] , identifier[ModelKey] ):
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
literal[string] % identifier[name] )
identifier[attr] . identifier[_fix_up] ( identifier[cls] , identifier[name] )
keyword[if] identifier[isinstance] ( identifier[attr] , identifier[Property] ):
keyword[if] ( identifier[attr] . identifier[_repeated] keyword[or]
( identifier[isinstance] ( identifier[attr] , identifier[StructuredProperty] ) keyword[and]
identifier[attr] . identifier[_modelclass] . identifier[_has_repeated] )):
identifier[cls] . identifier[_has_repeated] = keyword[True]
identifier[cls] . identifier[_properties] [ identifier[attr] . identifier[_name] ]= identifier[attr]
identifier[cls] . identifier[_update_kind_map] () | def _fix_up_properties(cls):
"""Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
"""
# Verify that _get_kind() returns an 8-bit string.
kind = cls._get_kind()
if not isinstance(kind, basestring):
raise KindError('Class %s defines a _get_kind() method that returns a non-string (%r)' % (cls.__name__, kind)) # depends on [control=['if'], data=[]]
if not isinstance(kind, str):
try:
kind = kind.encode('ascii') # ASCII contents is okay. # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
raise KindError('Class %s defines a _get_kind() method that returns a Unicode string (%r); please encode using utf-8' % (cls.__name__, kind)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
cls._properties = {} # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return # depends on [control=['if'], data=[]]
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, ModelAttribute) and (not isinstance(attr, ModelKey)):
if name.startswith('_'):
raise TypeError('ModelAttribute %s cannot begin with an underscore character. _ prefixed attributes are reserved for temporary Model instance values.' % name) # depends on [control=['if'], data=[]]
attr._fix_up(cls, name)
if isinstance(attr, Property):
if attr._repeated or (isinstance(attr, StructuredProperty) and attr._modelclass._has_repeated):
cls._has_repeated = True # depends on [control=['if'], data=[]]
cls._properties[attr._name] = attr # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
cls._update_kind_map() |
def HernquistX(s):
"""
Computes X function from equations (33) & (34) of Hernquist (1990)
"""
if(s<0.):
raise ValueError("s must be positive in Hernquist X function")
elif(s<1.):
return numpy.log((1+numpy.sqrt(1-s*s))/s)/numpy.sqrt(1-s*s)
elif(s==1.):
return 1.
else:
return numpy.arccos(1./s)/numpy.sqrt(s*s-1) | def function[HernquistX, parameter[s]]:
constant[
Computes X function from equations (33) & (34) of Hernquist (1990)
]
if compare[name[s] less[<] constant[0.0]] begin[:]
<ast.Raise object at 0x7da2044c3280> | keyword[def] identifier[HernquistX] ( identifier[s] ):
literal[string]
keyword[if] ( identifier[s] < literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] ( identifier[s] < literal[int] ):
keyword[return] identifier[numpy] . identifier[log] (( literal[int] + identifier[numpy] . identifier[sqrt] ( literal[int] - identifier[s] * identifier[s] ))/ identifier[s] )/ identifier[numpy] . identifier[sqrt] ( literal[int] - identifier[s] * identifier[s] )
keyword[elif] ( identifier[s] == literal[int] ):
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[numpy] . identifier[arccos] ( literal[int] / identifier[s] )/ identifier[numpy] . identifier[sqrt] ( identifier[s] * identifier[s] - literal[int] ) | def HernquistX(s):
"""
Computes X function from equations (33) & (34) of Hernquist (1990)
"""
if s < 0.0:
raise ValueError('s must be positive in Hernquist X function') # depends on [control=['if'], data=[]]
elif s < 1.0:
return numpy.log((1 + numpy.sqrt(1 - s * s)) / s) / numpy.sqrt(1 - s * s) # depends on [control=['if'], data=['s']]
elif s == 1.0:
return 1.0 # depends on [control=['if'], data=[]]
else:
return numpy.arccos(1.0 / s) / numpy.sqrt(s * s - 1) |
def tarbell_publish(command, args):
"""
Publish to s3.
"""
with ensure_settings(command, args) as settings, ensure_project(command, args) as site:
bucket_name = list_get(args, 0, "staging")
try:
bucket_url = S3Url(site.project.S3_BUCKETS[bucket_name])
except KeyError:
show_error(
"\nThere's no bucket configuration called '{0}' in "
"tarbell_config.py.".format(colored.yellow(bucket_name)))
sys.exit(1)
extra_context = {
"ROOT_URL": bucket_url,
"S3_BUCKET": bucket_url.root,
"BUCKET_NAME": bucket_name,
}
tempdir = "{0}/".format(tarbell_generate(command,
args, extra_context=extra_context, skip_args=True, quiet=True))
try:
title = site.project.DEFAULT_CONTEXT.get("title", "")
puts("\nDeploying {0} to {1} ({2})\n".format(
colored.yellow(title),
colored.red(bucket_name),
colored.green(bucket_url)
))
# Get creds
if settings.config:
# If settings has a config section, use it
kwargs = settings.config['s3_credentials'].get(bucket_url.root)
if not kwargs:
kwargs = {
'access_key_id': settings.config.get('default_s3_access_key_id'),
'secret_access_key': settings.config.get('default_s3_secret_access_key'),
}
puts("Using default bucket credentials")
else:
puts("Using custom bucket configuration for {0}".format(bucket_url.root))
else:
# If no configuration exists, read from environment variables if possible
puts("Attemping to use AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY")
kwargs = {
'access_key_id': os.environ["AWS_ACCESS_KEY_ID"],
'secret_access_key': os.environ["AWS_SECRET_ACCESS_KEY"],
}
if not kwargs.get('access_key_id') and not kwargs.get('secret_access_key'):
show_error('S3 access is not configured. Set up S3 with {0} to publish.'
.format(colored.green('tarbell configure')))
sys.exit()
s3 = S3Sync(tempdir, bucket_url, **kwargs)
s3.deploy_to_s3()
site.call_hook("publish", site, s3)
puts("\nIf you have website hosting enabled, you can see your project at:")
puts(colored.green("http://{0}\n".format(bucket_url)))
except KeyboardInterrupt:
show_error("ctrl-c pressed, bailing out!")
finally:
_delete_dir(tempdir) | def function[tarbell_publish, parameter[command, args]]:
constant[
Publish to s3.
]
with call[name[ensure_settings], parameter[name[command], name[args]]] begin[:]
variable[bucket_name] assign[=] call[name[list_get], parameter[name[args], constant[0], constant[staging]]]
<ast.Try object at 0x7da1b1988ca0>
variable[extra_context] assign[=] dictionary[[<ast.Constant object at 0x7da1b1989c00>, <ast.Constant object at 0x7da1b1988c10>, <ast.Constant object at 0x7da1b198b610>], [<ast.Name object at 0x7da1b1989cc0>, <ast.Attribute object at 0x7da1b198bd90>, <ast.Name object at 0x7da1b198afe0>]]
variable[tempdir] assign[=] call[constant[{0}/].format, parameter[call[name[tarbell_generate], parameter[name[command], name[args]]]]]
<ast.Try object at 0x7da1b1989a50> | keyword[def] identifier[tarbell_publish] ( identifier[command] , identifier[args] ):
literal[string]
keyword[with] identifier[ensure_settings] ( identifier[command] , identifier[args] ) keyword[as] identifier[settings] , identifier[ensure_project] ( identifier[command] , identifier[args] ) keyword[as] identifier[site] :
identifier[bucket_name] = identifier[list_get] ( identifier[args] , literal[int] , literal[string] )
keyword[try] :
identifier[bucket_url] = identifier[S3Url] ( identifier[site] . identifier[project] . identifier[S3_BUCKETS] [ identifier[bucket_name] ])
keyword[except] identifier[KeyError] :
identifier[show_error] (
literal[string]
literal[string] . identifier[format] ( identifier[colored] . identifier[yellow] ( identifier[bucket_name] )))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[extra_context] ={
literal[string] : identifier[bucket_url] ,
literal[string] : identifier[bucket_url] . identifier[root] ,
literal[string] : identifier[bucket_name] ,
}
identifier[tempdir] = literal[string] . identifier[format] ( identifier[tarbell_generate] ( identifier[command] ,
identifier[args] , identifier[extra_context] = identifier[extra_context] , identifier[skip_args] = keyword[True] , identifier[quiet] = keyword[True] ))
keyword[try] :
identifier[title] = identifier[site] . identifier[project] . identifier[DEFAULT_CONTEXT] . identifier[get] ( literal[string] , literal[string] )
identifier[puts] ( literal[string] . identifier[format] (
identifier[colored] . identifier[yellow] ( identifier[title] ),
identifier[colored] . identifier[red] ( identifier[bucket_name] ),
identifier[colored] . identifier[green] ( identifier[bucket_url] )
))
keyword[if] identifier[settings] . identifier[config] :
identifier[kwargs] = identifier[settings] . identifier[config] [ literal[string] ]. identifier[get] ( identifier[bucket_url] . identifier[root] )
keyword[if] keyword[not] identifier[kwargs] :
identifier[kwargs] ={
literal[string] : identifier[settings] . identifier[config] . identifier[get] ( literal[string] ),
literal[string] : identifier[settings] . identifier[config] . identifier[get] ( literal[string] ),
}
identifier[puts] ( literal[string] )
keyword[else] :
identifier[puts] ( literal[string] . identifier[format] ( identifier[bucket_url] . identifier[root] ))
keyword[else] :
identifier[puts] ( literal[string] )
identifier[kwargs] ={
literal[string] : identifier[os] . identifier[environ] [ literal[string] ],
literal[string] : identifier[os] . identifier[environ] [ literal[string] ],
}
keyword[if] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] ) keyword[and] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[show_error] ( literal[string]
. identifier[format] ( identifier[colored] . identifier[green] ( literal[string] )))
identifier[sys] . identifier[exit] ()
identifier[s3] = identifier[S3Sync] ( identifier[tempdir] , identifier[bucket_url] ,** identifier[kwargs] )
identifier[s3] . identifier[deploy_to_s3] ()
identifier[site] . identifier[call_hook] ( literal[string] , identifier[site] , identifier[s3] )
identifier[puts] ( literal[string] )
identifier[puts] ( identifier[colored] . identifier[green] ( literal[string] . identifier[format] ( identifier[bucket_url] )))
keyword[except] identifier[KeyboardInterrupt] :
identifier[show_error] ( literal[string] )
keyword[finally] :
identifier[_delete_dir] ( identifier[tempdir] ) | def tarbell_publish(command, args):
"""
Publish to s3.
"""
with ensure_settings(command, args) as settings, ensure_project(command, args) as site:
bucket_name = list_get(args, 0, 'staging')
try:
bucket_url = S3Url(site.project.S3_BUCKETS[bucket_name]) # depends on [control=['try'], data=[]]
except KeyError:
show_error("\nThere's no bucket configuration called '{0}' in tarbell_config.py.".format(colored.yellow(bucket_name)))
sys.exit(1) # depends on [control=['except'], data=[]]
extra_context = {'ROOT_URL': bucket_url, 'S3_BUCKET': bucket_url.root, 'BUCKET_NAME': bucket_name}
tempdir = '{0}/'.format(tarbell_generate(command, args, extra_context=extra_context, skip_args=True, quiet=True))
try:
title = site.project.DEFAULT_CONTEXT.get('title', '')
puts('\nDeploying {0} to {1} ({2})\n'.format(colored.yellow(title), colored.red(bucket_name), colored.green(bucket_url)))
# Get creds
if settings.config:
# If settings has a config section, use it
kwargs = settings.config['s3_credentials'].get(bucket_url.root)
if not kwargs:
kwargs = {'access_key_id': settings.config.get('default_s3_access_key_id'), 'secret_access_key': settings.config.get('default_s3_secret_access_key')}
puts('Using default bucket credentials') # depends on [control=['if'], data=[]]
else:
puts('Using custom bucket configuration for {0}'.format(bucket_url.root)) # depends on [control=['if'], data=[]]
else:
# If no configuration exists, read from environment variables if possible
puts('Attemping to use AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY')
kwargs = {'access_key_id': os.environ['AWS_ACCESS_KEY_ID'], 'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY']}
if not kwargs.get('access_key_id') and (not kwargs.get('secret_access_key')):
show_error('S3 access is not configured. Set up S3 with {0} to publish.'.format(colored.green('tarbell configure')))
sys.exit() # depends on [control=['if'], data=[]]
s3 = S3Sync(tempdir, bucket_url, **kwargs)
s3.deploy_to_s3()
site.call_hook('publish', site, s3)
puts('\nIf you have website hosting enabled, you can see your project at:')
puts(colored.green('http://{0}\n'.format(bucket_url))) # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
show_error('ctrl-c pressed, bailing out!') # depends on [control=['except'], data=[]]
finally:
_delete_dir(tempdir) # depends on [control=['with'], data=['settings']] |
def show_vcs_output_vcs_nodes_vcs_node_info_node_public_ip_addresses_node_public_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_public_ip_addresses = ET.SubElement(vcs_node_info, "node-public-ip-addresses")
node_public_ip_address = ET.SubElement(node_public_ip_addresses, "node-public-ip-address")
node_public_ip_address.text = kwargs.pop('node_public_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[show_vcs_output_vcs_nodes_vcs_node_info_node_public_ip_addresses_node_public_ip_address, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[show_vcs] assign[=] call[name[ET].Element, parameter[constant[show_vcs]]]
variable[config] assign[=] name[show_vcs]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[show_vcs], constant[output]]]
variable[vcs_nodes] assign[=] call[name[ET].SubElement, parameter[name[output], constant[vcs-nodes]]]
variable[vcs_node_info] assign[=] call[name[ET].SubElement, parameter[name[vcs_nodes], constant[vcs-node-info]]]
variable[node_public_ip_addresses] assign[=] call[name[ET].SubElement, parameter[name[vcs_node_info], constant[node-public-ip-addresses]]]
variable[node_public_ip_address] assign[=] call[name[ET].SubElement, parameter[name[node_public_ip_addresses], constant[node-public-ip-address]]]
name[node_public_ip_address].text assign[=] call[name[kwargs].pop, parameter[constant[node_public_ip_address]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[show_vcs_output_vcs_nodes_vcs_node_info_node_public_ip_addresses_node_public_ip_address] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[show_vcs] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[show_vcs]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[show_vcs] , literal[string] )
identifier[vcs_nodes] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[vcs_node_info] = identifier[ET] . identifier[SubElement] ( identifier[vcs_nodes] , literal[string] )
identifier[node_public_ip_addresses] = identifier[ET] . identifier[SubElement] ( identifier[vcs_node_info] , literal[string] )
identifier[node_public_ip_address] = identifier[ET] . identifier[SubElement] ( identifier[node_public_ip_addresses] , literal[string] )
identifier[node_public_ip_address] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def show_vcs_output_vcs_nodes_vcs_node_info_node_public_ip_addresses_node_public_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
show_vcs = ET.Element('show_vcs')
config = show_vcs
output = ET.SubElement(show_vcs, 'output')
vcs_nodes = ET.SubElement(output, 'vcs-nodes')
vcs_node_info = ET.SubElement(vcs_nodes, 'vcs-node-info')
node_public_ip_addresses = ET.SubElement(vcs_node_info, 'node-public-ip-addresses')
node_public_ip_address = ET.SubElement(node_public_ip_addresses, 'node-public-ip-address')
node_public_ip_address.text = kwargs.pop('node_public_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def getSVD(data, k, getComponents=False, getS=False, normalization='mean'):
""" Wrapper for computeSVD that will normalize and handle a Thunder Images object
:param data: Thunder Images object
:param k: number of components to keep
:param getComponents: will return the components if true, otherwise will return None
:returns: projections, components, s
"""
if normalization == 'nanmean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - np.nanmean(x)))
elif normalization == 'mean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - x.mean()))
elif normalization is 'zscore':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(zscore(x.flatten())))
elif normalization is None:
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten()))
else:
raise ValueError('Normalization should be one of: mean, nanmean, zscore, None. Got: %s' % normalization)
mat = RowMatrix(data2)
mat.rows.cache()
mat.rows.count()
svd = compute_svd(row_matrix=mat, k=k, compute_u=False)
if getComponents:
components = svd.call("V").toArray()
components = components.transpose(1, 0).reshape((k,) + data.shape[1:])
else:
components = None
projection = np.array(RowMatrix_new(data2).multiply(svd.call("V")).rows.collect())
if getS:
s = svd.call("s").toArray()
else:
s = None
return projection, components, s | def function[getSVD, parameter[data, k, getComponents, getS, normalization]]:
constant[ Wrapper for computeSVD that will normalize and handle a Thunder Images object
:param data: Thunder Images object
:param k: number of components to keep
:param getComponents: will return the components if true, otherwise will return None
:returns: projections, components, s
]
if compare[name[normalization] equal[==] constant[nanmean]] begin[:]
variable[data2] assign[=] call[call[call[call[name[data].tordd, parameter[]].sortByKey, parameter[]].values, parameter[]].map, parameter[<ast.Lambda object at 0x7da1b18b9330>]]
variable[mat] assign[=] call[name[RowMatrix], parameter[name[data2]]]
call[name[mat].rows.cache, parameter[]]
call[name[mat].rows.count, parameter[]]
variable[svd] assign[=] call[name[compute_svd], parameter[]]
if name[getComponents] begin[:]
variable[components] assign[=] call[call[name[svd].call, parameter[constant[V]]].toArray, parameter[]]
variable[components] assign[=] call[call[name[components].transpose, parameter[constant[1], constant[0]]].reshape, parameter[binary_operation[tuple[[<ast.Name object at 0x7da1b1814d60>]] + call[name[data].shape][<ast.Slice object at 0x7da1b1814a30>]]]]
variable[projection] assign[=] call[name[np].array, parameter[call[call[call[name[RowMatrix_new], parameter[name[data2]]].multiply, parameter[call[name[svd].call, parameter[constant[V]]]]].rows.collect, parameter[]]]]
if name[getS] begin[:]
variable[s] assign[=] call[call[name[svd].call, parameter[constant[s]]].toArray, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b18173d0>, <ast.Name object at 0x7da1b1817400>, <ast.Name object at 0x7da1b1817460>]]] | keyword[def] identifier[getSVD] ( identifier[data] , identifier[k] , identifier[getComponents] = keyword[False] , identifier[getS] = keyword[False] , identifier[normalization] = literal[string] ):
literal[string]
keyword[if] identifier[normalization] == literal[string] :
identifier[data2] = identifier[data] . identifier[tordd] (). identifier[sortByKey] (). identifier[values] (). identifier[map] ( keyword[lambda] identifier[x] : identifier[_convert_to_vector] ( identifier[x] . identifier[flatten] ()- identifier[np] . identifier[nanmean] ( identifier[x] )))
keyword[elif] identifier[normalization] == literal[string] :
identifier[data2] = identifier[data] . identifier[tordd] (). identifier[sortByKey] (). identifier[values] (). identifier[map] ( keyword[lambda] identifier[x] : identifier[_convert_to_vector] ( identifier[x] . identifier[flatten] ()- identifier[x] . identifier[mean] ()))
keyword[elif] identifier[normalization] keyword[is] literal[string] :
identifier[data2] = identifier[data] . identifier[tordd] (). identifier[sortByKey] (). identifier[values] (). identifier[map] ( keyword[lambda] identifier[x] : identifier[_convert_to_vector] ( identifier[zscore] ( identifier[x] . identifier[flatten] ())))
keyword[elif] identifier[normalization] keyword[is] keyword[None] :
identifier[data2] = identifier[data] . identifier[tordd] (). identifier[sortByKey] (). identifier[values] (). identifier[map] ( keyword[lambda] identifier[x] : identifier[_convert_to_vector] ( identifier[x] . identifier[flatten] ()))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[normalization] )
identifier[mat] = identifier[RowMatrix] ( identifier[data2] )
identifier[mat] . identifier[rows] . identifier[cache] ()
identifier[mat] . identifier[rows] . identifier[count] ()
identifier[svd] = identifier[compute_svd] ( identifier[row_matrix] = identifier[mat] , identifier[k] = identifier[k] , identifier[compute_u] = keyword[False] )
keyword[if] identifier[getComponents] :
identifier[components] = identifier[svd] . identifier[call] ( literal[string] ). identifier[toArray] ()
identifier[components] = identifier[components] . identifier[transpose] ( literal[int] , literal[int] ). identifier[reshape] (( identifier[k] ,)+ identifier[data] . identifier[shape] [ literal[int] :])
keyword[else] :
identifier[components] = keyword[None]
identifier[projection] = identifier[np] . identifier[array] ( identifier[RowMatrix_new] ( identifier[data2] ). identifier[multiply] ( identifier[svd] . identifier[call] ( literal[string] )). identifier[rows] . identifier[collect] ())
keyword[if] identifier[getS] :
identifier[s] = identifier[svd] . identifier[call] ( literal[string] ). identifier[toArray] ()
keyword[else] :
identifier[s] = keyword[None]
keyword[return] identifier[projection] , identifier[components] , identifier[s] | def getSVD(data, k, getComponents=False, getS=False, normalization='mean'):
""" Wrapper for computeSVD that will normalize and handle a Thunder Images object
:param data: Thunder Images object
:param k: number of components to keep
:param getComponents: will return the components if true, otherwise will return None
:returns: projections, components, s
"""
if normalization == 'nanmean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - np.nanmean(x))) # depends on [control=['if'], data=[]]
elif normalization == 'mean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - x.mean())) # depends on [control=['if'], data=[]]
elif normalization is 'zscore':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(zscore(x.flatten()))) # depends on [control=['if'], data=[]]
elif normalization is None:
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten())) # depends on [control=['if'], data=[]]
else:
raise ValueError('Normalization should be one of: mean, nanmean, zscore, None. Got: %s' % normalization)
mat = RowMatrix(data2)
mat.rows.cache()
mat.rows.count()
svd = compute_svd(row_matrix=mat, k=k, compute_u=False)
if getComponents:
components = svd.call('V').toArray()
components = components.transpose(1, 0).reshape((k,) + data.shape[1:]) # depends on [control=['if'], data=[]]
else:
components = None
projection = np.array(RowMatrix_new(data2).multiply(svd.call('V')).rows.collect())
if getS:
s = svd.call('s').toArray() # depends on [control=['if'], data=[]]
else:
s = None
return (projection, components, s) |
def find_region_end(self, lines):
"""Find the end of the region started with start and end markers"""
if self.metadata and 'cell_type' in self.metadata:
self.cell_type = self.metadata.pop('cell_type')
else:
self.cell_type = 'code'
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
# skip cell header
if self.metadata is not None and i == 0:
continue
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
# New code region
# Simple code pattern in LightScripts must be preceded with a blank line
if self.start_code_re.match(line) or (
self.simple_start_code_re and self.simple_start_code_re.match(line) and
(self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1]))):
if self.explicit_end_marker_required:
# Metadata here was conditioned on finding an explicit end marker
# before the next start marker. So we dismiss it.
self.metadata = None
self.language = None
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
if i > 1 and _BLANK_LINE.match(lines[i - 2]):
return i - 2, i, False
return i - 1, i, False
return i, i, False
if not self.ignore_end_marker and self.end_code_re:
if self.end_code_re.match(line):
return i, i + 1, True
elif _BLANK_LINE.match(line):
if not next_code_is_indented(lines[i:]):
if i > 0:
return i, i + 1, False
if len(lines) > 1 and not _BLANK_LINE.match(lines[1]):
return 1, 1, False
return 1, 2, False
return len(lines), len(lines), False | def function[find_region_end, parameter[self, lines]]:
constant[Find the end of the region started with start and end markers]
if <ast.BoolOp object at 0x7da1b1c34e80> begin[:]
name[self].cell_type assign[=] call[name[self].metadata.pop, parameter[constant[cell_type]]]
variable[parser] assign[=] call[name[StringParser], parameter[<ast.BoolOp object at 0x7da1b1c36e30>]]
for taget[tuple[[<ast.Name object at 0x7da1b1c36320>, <ast.Name object at 0x7da1b1c35ed0>]]] in starred[call[name[enumerate], parameter[name[lines]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1c35330> begin[:]
continue
if call[name[parser].is_quoted, parameter[]] begin[:]
call[name[parser].read_line, parameter[name[line]]]
continue
call[name[parser].read_line, parameter[name[line]]]
if <ast.BoolOp object at 0x7da1b1c36950> begin[:]
if name[self].explicit_end_marker_required begin[:]
name[self].metadata assign[=] constant[None]
name[self].language assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b1c35a80> begin[:]
if <ast.BoolOp object at 0x7da1b1c36dd0> begin[:]
return[tuple[[<ast.BinOp object at 0x7da1b1c366e0>, <ast.Name object at 0x7da1b1c36e00>, <ast.Constant object at 0x7da1b1c36c80>]]]
return[tuple[[<ast.BinOp object at 0x7da1b1c35ab0>, <ast.Name object at 0x7da1b1c35180>, <ast.Constant object at 0x7da1b1c363b0>]]]
return[tuple[[<ast.Name object at 0x7da1b1c35510>, <ast.Name object at 0x7da1b1c36260>, <ast.Constant object at 0x7da1b1c35360>]]]
if <ast.BoolOp object at 0x7da1b1c8abc0> begin[:]
if call[name[self].end_code_re.match, parameter[name[line]]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1c89ab0>, <ast.BinOp object at 0x7da1b1c89810>, <ast.Constant object at 0x7da1b1c88880>]]]
return[tuple[[<ast.Call object at 0x7da1b1c8a680>, <ast.Call object at 0x7da1b1c8bb80>, <ast.Constant object at 0x7da1b1c8aaa0>]]] | keyword[def] identifier[find_region_end] ( identifier[self] , identifier[lines] ):
literal[string]
keyword[if] identifier[self] . identifier[metadata] keyword[and] literal[string] keyword[in] identifier[self] . identifier[metadata] :
identifier[self] . identifier[cell_type] = identifier[self] . identifier[metadata] . identifier[pop] ( literal[string] )
keyword[else] :
identifier[self] . identifier[cell_type] = literal[string]
identifier[parser] = identifier[StringParser] ( identifier[self] . identifier[language] keyword[or] identifier[self] . identifier[default_language] )
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[lines] ):
keyword[if] identifier[self] . identifier[metadata] keyword[is] keyword[not] keyword[None] keyword[and] identifier[i] == literal[int] :
keyword[continue]
keyword[if] identifier[parser] . identifier[is_quoted] ():
identifier[parser] . identifier[read_line] ( identifier[line] )
keyword[continue]
identifier[parser] . identifier[read_line] ( identifier[line] )
keyword[if] identifier[self] . identifier[start_code_re] . identifier[match] ( identifier[line] ) keyword[or] (
identifier[self] . identifier[simple_start_code_re] keyword[and] identifier[self] . identifier[simple_start_code_re] . identifier[match] ( identifier[line] ) keyword[and]
( identifier[self] . identifier[cell_marker_start] keyword[or] identifier[i] == literal[int] keyword[or] identifier[_BLANK_LINE] . identifier[match] ( identifier[lines] [ identifier[i] - literal[int] ]))):
keyword[if] identifier[self] . identifier[explicit_end_marker_required] :
identifier[self] . identifier[metadata] = keyword[None]
identifier[self] . identifier[language] = keyword[None]
keyword[if] identifier[i] > literal[int] keyword[and] identifier[_BLANK_LINE] . identifier[match] ( identifier[lines] [ identifier[i] - literal[int] ]):
keyword[if] identifier[i] > literal[int] keyword[and] identifier[_BLANK_LINE] . identifier[match] ( identifier[lines] [ identifier[i] - literal[int] ]):
keyword[return] identifier[i] - literal[int] , identifier[i] , keyword[False]
keyword[return] identifier[i] - literal[int] , identifier[i] , keyword[False]
keyword[return] identifier[i] , identifier[i] , keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[ignore_end_marker] keyword[and] identifier[self] . identifier[end_code_re] :
keyword[if] identifier[self] . identifier[end_code_re] . identifier[match] ( identifier[line] ):
keyword[return] identifier[i] , identifier[i] + literal[int] , keyword[True]
keyword[elif] identifier[_BLANK_LINE] . identifier[match] ( identifier[line] ):
keyword[if] keyword[not] identifier[next_code_is_indented] ( identifier[lines] [ identifier[i] :]):
keyword[if] identifier[i] > literal[int] :
keyword[return] identifier[i] , identifier[i] + literal[int] , keyword[False]
keyword[if] identifier[len] ( identifier[lines] )> literal[int] keyword[and] keyword[not] identifier[_BLANK_LINE] . identifier[match] ( identifier[lines] [ literal[int] ]):
keyword[return] literal[int] , literal[int] , keyword[False]
keyword[return] literal[int] , literal[int] , keyword[False]
keyword[return] identifier[len] ( identifier[lines] ), identifier[len] ( identifier[lines] ), keyword[False] | def find_region_end(self, lines):
"""Find the end of the region started with start and end markers"""
if self.metadata and 'cell_type' in self.metadata:
self.cell_type = self.metadata.pop('cell_type') # depends on [control=['if'], data=[]]
else:
self.cell_type = 'code'
parser = StringParser(self.language or self.default_language)
for (i, line) in enumerate(lines):
# skip cell header
if self.metadata is not None and i == 0:
continue # depends on [control=['if'], data=[]]
if parser.is_quoted():
parser.read_line(line)
continue # depends on [control=['if'], data=[]]
parser.read_line(line)
# New code region
# Simple code pattern in LightScripts must be preceded with a blank line
if self.start_code_re.match(line) or (self.simple_start_code_re and self.simple_start_code_re.match(line) and (self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1]))):
if self.explicit_end_marker_required:
# Metadata here was conditioned on finding an explicit end marker
# before the next start marker. So we dismiss it.
self.metadata = None
self.language = None # depends on [control=['if'], data=[]]
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
if i > 1 and _BLANK_LINE.match(lines[i - 2]):
return (i - 2, i, False) # depends on [control=['if'], data=[]]
return (i - 1, i, False) # depends on [control=['if'], data=[]]
return (i, i, False) # depends on [control=['if'], data=[]]
if not self.ignore_end_marker and self.end_code_re:
if self.end_code_re.match(line):
return (i, i + 1, True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif _BLANK_LINE.match(line):
if not next_code_is_indented(lines[i:]):
if i > 0:
return (i, i + 1, False) # depends on [control=['if'], data=['i']]
if len(lines) > 1 and (not _BLANK_LINE.match(lines[1])):
return (1, 1, False) # depends on [control=['if'], data=[]]
return (1, 2, False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (len(lines), len(lines), False) |
def get_object_data(obj, fields, safe):
"""
Given an object and a list of fields, recursively build an object for serialization.
Returns a dictionary.
"""
temp_dict = dict()
for field in fields:
try:
attribute = getattr(obj, str(field))
if isinstance(attribute, list) and all([isinstance(item, models.Model) for item in attribute]):
temp_dict[field] = []
for item in attribute:
temp_dict[field].append(get_object_data(item, get_fields(item), safe)) # Recur
elif isinstance(attribute, models.Model):
attribute_fields = get_fields(attribute)
object_data = get_object_data(attribute, attribute_fields, safe) # Recur
temp_dict[field] = object_data
else:
if not safe:
if isinstance(attribute, basestring):
attribute = cgi.escape(attribute)
temp_dict[field] = attribute
except Exception as e:
logger.info("Unable to get attribute.")
logger.error(e)
continue
return temp_dict | def function[get_object_data, parameter[obj, fields, safe]]:
constant[
Given an object and a list of fields, recursively build an object for serialization.
Returns a dictionary.
]
variable[temp_dict] assign[=] call[name[dict], parameter[]]
for taget[name[field]] in starred[name[fields]] begin[:]
<ast.Try object at 0x7da1b2346d70>
return[name[temp_dict]] | keyword[def] identifier[get_object_data] ( identifier[obj] , identifier[fields] , identifier[safe] ):
literal[string]
identifier[temp_dict] = identifier[dict] ()
keyword[for] identifier[field] keyword[in] identifier[fields] :
keyword[try] :
identifier[attribute] = identifier[getattr] ( identifier[obj] , identifier[str] ( identifier[field] ))
keyword[if] identifier[isinstance] ( identifier[attribute] , identifier[list] ) keyword[and] identifier[all] ([ identifier[isinstance] ( identifier[item] , identifier[models] . identifier[Model] ) keyword[for] identifier[item] keyword[in] identifier[attribute] ]):
identifier[temp_dict] [ identifier[field] ]=[]
keyword[for] identifier[item] keyword[in] identifier[attribute] :
identifier[temp_dict] [ identifier[field] ]. identifier[append] ( identifier[get_object_data] ( identifier[item] , identifier[get_fields] ( identifier[item] ), identifier[safe] ))
keyword[elif] identifier[isinstance] ( identifier[attribute] , identifier[models] . identifier[Model] ):
identifier[attribute_fields] = identifier[get_fields] ( identifier[attribute] )
identifier[object_data] = identifier[get_object_data] ( identifier[attribute] , identifier[attribute_fields] , identifier[safe] )
identifier[temp_dict] [ identifier[field] ]= identifier[object_data]
keyword[else] :
keyword[if] keyword[not] identifier[safe] :
keyword[if] identifier[isinstance] ( identifier[attribute] , identifier[basestring] ):
identifier[attribute] = identifier[cgi] . identifier[escape] ( identifier[attribute] )
identifier[temp_dict] [ identifier[field] ]= identifier[attribute]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[error] ( identifier[e] )
keyword[continue]
keyword[return] identifier[temp_dict] | def get_object_data(obj, fields, safe):
"""
Given an object and a list of fields, recursively build an object for serialization.
Returns a dictionary.
"""
temp_dict = dict()
for field in fields:
try:
attribute = getattr(obj, str(field))
if isinstance(attribute, list) and all([isinstance(item, models.Model) for item in attribute]):
temp_dict[field] = []
for item in attribute:
temp_dict[field].append(get_object_data(item, get_fields(item), safe)) # Recur # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
elif isinstance(attribute, models.Model):
attribute_fields = get_fields(attribute)
object_data = get_object_data(attribute, attribute_fields, safe) # Recur
temp_dict[field] = object_data # depends on [control=['if'], data=[]]
else:
if not safe:
if isinstance(attribute, basestring):
attribute = cgi.escape(attribute) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
temp_dict[field] = attribute # depends on [control=['try'], data=[]]
except Exception as e:
logger.info('Unable to get attribute.')
logger.error(e)
continue # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['field']]
return temp_dict |
def update_binary_stats(self, label, pred):
"""
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
"""
pred = pred.asnumpy()
label = label.asnumpy().astype('int32')
pred_label = numpy.argmax(pred, axis=1)
check_label_shapes(label, pred)
if len(numpy.unique(label)) > 2:
raise ValueError("%s currently only supports binary classification."
% self.__class__.__name__)
pred_true = (pred_label == 1)
pred_false = 1 - pred_true
label_true = (label == 1)
label_false = 1 - label_true
true_pos = (pred_true * label_true).sum()
false_pos = (pred_true * label_false).sum()
false_neg = (pred_false * label_true).sum()
true_neg = (pred_false * label_false).sum()
self.true_positives += true_pos
self.global_true_positives += true_pos
self.false_positives += false_pos
self.global_false_positives += false_pos
self.false_negatives += false_neg
self.global_false_negatives += false_neg
self.true_negatives += true_neg
self.global_true_negatives += true_neg | def function[update_binary_stats, parameter[self, label, pred]]:
constant[
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
]
variable[pred] assign[=] call[name[pred].asnumpy, parameter[]]
variable[label] assign[=] call[call[name[label].asnumpy, parameter[]].astype, parameter[constant[int32]]]
variable[pred_label] assign[=] call[name[numpy].argmax, parameter[name[pred]]]
call[name[check_label_shapes], parameter[name[label], name[pred]]]
if compare[call[name[len], parameter[call[name[numpy].unique, parameter[name[label]]]]] greater[>] constant[2]] begin[:]
<ast.Raise object at 0x7da1b2088160>
variable[pred_true] assign[=] compare[name[pred_label] equal[==] constant[1]]
variable[pred_false] assign[=] binary_operation[constant[1] - name[pred_true]]
variable[label_true] assign[=] compare[name[label] equal[==] constant[1]]
variable[label_false] assign[=] binary_operation[constant[1] - name[label_true]]
variable[true_pos] assign[=] call[binary_operation[name[pred_true] * name[label_true]].sum, parameter[]]
variable[false_pos] assign[=] call[binary_operation[name[pred_true] * name[label_false]].sum, parameter[]]
variable[false_neg] assign[=] call[binary_operation[name[pred_false] * name[label_true]].sum, parameter[]]
variable[true_neg] assign[=] call[binary_operation[name[pred_false] * name[label_false]].sum, parameter[]]
<ast.AugAssign object at 0x7da1b204d6c0>
<ast.AugAssign object at 0x7da1b204df30>
<ast.AugAssign object at 0x7da1b204f940>
<ast.AugAssign object at 0x7da1b204d360>
<ast.AugAssign object at 0x7da1b204e920>
<ast.AugAssign object at 0x7da1b204f190>
<ast.AugAssign object at 0x7da1b204f8b0>
<ast.AugAssign object at 0x7da1b204ff10> | keyword[def] identifier[update_binary_stats] ( identifier[self] , identifier[label] , identifier[pred] ):
literal[string]
identifier[pred] = identifier[pred] . identifier[asnumpy] ()
identifier[label] = identifier[label] . identifier[asnumpy] (). identifier[astype] ( literal[string] )
identifier[pred_label] = identifier[numpy] . identifier[argmax] ( identifier[pred] , identifier[axis] = literal[int] )
identifier[check_label_shapes] ( identifier[label] , identifier[pred] )
keyword[if] identifier[len] ( identifier[numpy] . identifier[unique] ( identifier[label] ))> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[self] . identifier[__class__] . identifier[__name__] )
identifier[pred_true] =( identifier[pred_label] == literal[int] )
identifier[pred_false] = literal[int] - identifier[pred_true]
identifier[label_true] =( identifier[label] == literal[int] )
identifier[label_false] = literal[int] - identifier[label_true]
identifier[true_pos] =( identifier[pred_true] * identifier[label_true] ). identifier[sum] ()
identifier[false_pos] =( identifier[pred_true] * identifier[label_false] ). identifier[sum] ()
identifier[false_neg] =( identifier[pred_false] * identifier[label_true] ). identifier[sum] ()
identifier[true_neg] =( identifier[pred_false] * identifier[label_false] ). identifier[sum] ()
identifier[self] . identifier[true_positives] += identifier[true_pos]
identifier[self] . identifier[global_true_positives] += identifier[true_pos]
identifier[self] . identifier[false_positives] += identifier[false_pos]
identifier[self] . identifier[global_false_positives] += identifier[false_pos]
identifier[self] . identifier[false_negatives] += identifier[false_neg]
identifier[self] . identifier[global_false_negatives] += identifier[false_neg]
identifier[self] . identifier[true_negatives] += identifier[true_neg]
identifier[self] . identifier[global_true_negatives] += identifier[true_neg] | def update_binary_stats(self, label, pred):
"""
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
"""
pred = pred.asnumpy()
label = label.asnumpy().astype('int32')
pred_label = numpy.argmax(pred, axis=1)
check_label_shapes(label, pred)
if len(numpy.unique(label)) > 2:
raise ValueError('%s currently only supports binary classification.' % self.__class__.__name__) # depends on [control=['if'], data=[]]
pred_true = pred_label == 1
pred_false = 1 - pred_true
label_true = label == 1
label_false = 1 - label_true
true_pos = (pred_true * label_true).sum()
false_pos = (pred_true * label_false).sum()
false_neg = (pred_false * label_true).sum()
true_neg = (pred_false * label_false).sum()
self.true_positives += true_pos
self.global_true_positives += true_pos
self.false_positives += false_pos
self.global_false_positives += false_pos
self.false_negatives += false_neg
self.global_false_negatives += false_neg
self.true_negatives += true_neg
self.global_true_negatives += true_neg |
def do_wordwrap(environment, s, width=79, break_long_words=True,
wrapstring=None):
"""
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`. By default, the newlines
will be the default newlines for the environment, but this can be changed
using the wrapstring keyword argument.
.. versionadded:: 2.7
Added support for the `wrapstring` parameter.
"""
if not wrapstring:
wrapstring = environment.newline_sequence
import textwrap
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words)) | def function[do_wordwrap, parameter[environment, s, width, break_long_words, wrapstring]]:
constant[
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`. By default, the newlines
will be the default newlines for the environment, but this can be changed
using the wrapstring keyword argument.
.. versionadded:: 2.7
Added support for the `wrapstring` parameter.
]
if <ast.UnaryOp object at 0x7da1b1ea2200> begin[:]
variable[wrapstring] assign[=] name[environment].newline_sequence
import module[textwrap]
return[call[name[wrapstring].join, parameter[call[name[textwrap].wrap, parameter[name[s]]]]]] | keyword[def] identifier[do_wordwrap] ( identifier[environment] , identifier[s] , identifier[width] = literal[int] , identifier[break_long_words] = keyword[True] ,
identifier[wrapstring] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[wrapstring] :
identifier[wrapstring] = identifier[environment] . identifier[newline_sequence]
keyword[import] identifier[textwrap]
keyword[return] identifier[wrapstring] . identifier[join] ( identifier[textwrap] . identifier[wrap] ( identifier[s] , identifier[width] = identifier[width] , identifier[expand_tabs] = keyword[False] ,
identifier[replace_whitespace] = keyword[False] ,
identifier[break_long_words] = identifier[break_long_words] )) | def do_wordwrap(environment, s, width=79, break_long_words=True, wrapstring=None):
"""
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`. By default, the newlines
will be the default newlines for the environment, but this can be changed
using the wrapstring keyword argument.
.. versionadded:: 2.7
Added support for the `wrapstring` parameter.
"""
if not wrapstring:
wrapstring = environment.newline_sequence # depends on [control=['if'], data=[]]
import textwrap
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False, replace_whitespace=False, break_long_words=break_long_words)) |
def import_data(
self, resource_group_name, name, files, format=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Import data into Redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the Redis cache.
:type name: str
:param files: files to import.
:type files: list[str]
:param format: File format.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._import_data_initial(
resource_group_name=resource_group_name,
name=name,
files=files,
format=format,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | def function[import_data, parameter[self, resource_group_name, name, files, format, custom_headers, raw, polling]]:
constant[Import data into Redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the Redis cache.
:type name: str
:param files: files to import.
:type files: list[str]
:param format: File format.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
]
variable[raw_result] assign[=] call[name[self]._import_data_initial, parameter[]]
def function[get_long_running_output, parameter[response]]:
if name[raw] begin[:]
variable[client_raw_response] assign[=] call[name[ClientRawResponse], parameter[constant[None], name[response]]]
return[name[client_raw_response]]
variable[lro_delay] assign[=] call[name[operation_config].get, parameter[constant[long_running_operation_timeout], name[self].config.long_running_operation_timeout]]
if compare[name[polling] is constant[True]] begin[:]
variable[polling_method] assign[=] call[name[ARMPolling], parameter[name[lro_delay]]]
return[call[name[LROPoller], parameter[name[self]._client, name[raw_result], name[get_long_running_output], name[polling_method]]]] | keyword[def] identifier[import_data] (
identifier[self] , identifier[resource_group_name] , identifier[name] , identifier[files] , identifier[format] = keyword[None] , identifier[custom_headers] = keyword[None] , identifier[raw] = keyword[False] , identifier[polling] = keyword[True] ,** identifier[operation_config] ):
literal[string]
identifier[raw_result] = identifier[self] . identifier[_import_data_initial] (
identifier[resource_group_name] = identifier[resource_group_name] ,
identifier[name] = identifier[name] ,
identifier[files] = identifier[files] ,
identifier[format] = identifier[format] ,
identifier[custom_headers] = identifier[custom_headers] ,
identifier[raw] = keyword[True] ,
** identifier[operation_config]
)
keyword[def] identifier[get_long_running_output] ( identifier[response] ):
keyword[if] identifier[raw] :
identifier[client_raw_response] = identifier[ClientRawResponse] ( keyword[None] , identifier[response] )
keyword[return] identifier[client_raw_response]
identifier[lro_delay] = identifier[operation_config] . identifier[get] (
literal[string] ,
identifier[self] . identifier[config] . identifier[long_running_operation_timeout] )
keyword[if] identifier[polling] keyword[is] keyword[True] : identifier[polling_method] = identifier[ARMPolling] ( identifier[lro_delay] ,** identifier[operation_config] )
keyword[elif] identifier[polling] keyword[is] keyword[False] : identifier[polling_method] = identifier[NoPolling] ()
keyword[else] : identifier[polling_method] = identifier[polling]
keyword[return] identifier[LROPoller] ( identifier[self] . identifier[_client] , identifier[raw_result] , identifier[get_long_running_output] , identifier[polling_method] ) | def import_data(self, resource_group_name, name, files, format=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Import data into Redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the Redis cache.
:type name: str
:param files: files to import.
:type files: list[str]
:param format: File format.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._import_data_initial(resource_group_name=resource_group_name, name=name, files=files, format=format, custom_headers=custom_headers, raw=True, **operation_config)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response # depends on [control=['if'], data=[]]
lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout)
if polling is True:
polling_method = ARMPolling(lro_delay, **operation_config) # depends on [control=['if'], data=[]]
elif polling is False:
polling_method = NoPolling() # depends on [control=['if'], data=[]]
else:
polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def get_file_to_stream(
self, share_name, directory_name, file_name, stream,
start_range=None, end_range=None, range_get_content_md5=None,
progress_callback=None, max_connections=1, max_retries=5,
retry_wait=1.0, timeout=None):
'''
Downloads a file to a stream, with automatic chunking and progress
notifications. Returns an instance of :class:`File` with properties
and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param io.IOBase stream:
Opened file/stream to write to.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool range_get_content_md5:
When this header is set to True and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Set to 1 to download the file sequentially.
Set to 2 or greater if you want to download a file larger than 64MB in chunks.
If the file size does not exceed 64MB it will be downloaded in one chunk.
:param int max_retries:
Number of times to retry download of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('stream', stream)
if sys.version_info >= (3,) and max_connections > 1 and not stream.seekable():
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
# Only get properties if parallelism will actually be used
file_size = None
if max_connections > 1 and range_get_content_md5 is None:
file = self.get_file_properties(share_name, directory_name,
file_name, timeout=timeout)
file_size = file.properties.content_length
# If file size is large, use parallel download
if file_size >= self.MAX_SINGLE_GET_SIZE:
_download_file_chunks(
self,
share_name,
directory_name,
file_name,
file_size,
self.MAX_CHUNK_GET_SIZE,
start_range,
end_range,
stream,
max_connections,
max_retries,
retry_wait,
progress_callback,
timeout
)
return file
# If parallelism is off or the file is small, do a single download
download_size = _get_download_size(start_range, end_range, file_size)
if progress_callback:
progress_callback(0, download_size)
file = self._get_file(
share_name,
directory_name,
file_name,
start_range=start_range,
end_range=end_range,
range_get_content_md5=range_get_content_md5,
timeout=timeout)
if file.content is not None:
stream.write(file.content)
if progress_callback:
download_size = len(file.content)
progress_callback(download_size, download_size)
file.content = None # Clear file content since output has been written to user stream
return file | def function[get_file_to_stream, parameter[self, share_name, directory_name, file_name, stream, start_range, end_range, range_get_content_md5, progress_callback, max_connections, max_retries, retry_wait, timeout]]:
constant[
Downloads a file to a stream, with automatic chunking and progress
notifications. Returns an instance of :class:`File` with properties
and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param io.IOBase stream:
Opened file/stream to write to.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool range_get_content_md5:
When this header is set to True and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Set to 1 to download the file sequentially.
Set to 2 or greater if you want to download a file larger than 64MB in chunks.
If the file size does not exceed 64MB it will be downloaded in one chunk.
:param int max_retries:
Number of times to retry download of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
]
call[name[_validate_not_none], parameter[constant[share_name], name[share_name]]]
call[name[_validate_not_none], parameter[constant[file_name], name[file_name]]]
call[name[_validate_not_none], parameter[constant[stream], name[stream]]]
if <ast.BoolOp object at 0x7da20c9906d0> begin[:]
<ast.Raise object at 0x7da20c992c80>
variable[file_size] assign[=] constant[None]
if <ast.BoolOp object at 0x7da20c9912d0> begin[:]
variable[file] assign[=] call[name[self].get_file_properties, parameter[name[share_name], name[directory_name], name[file_name]]]
variable[file_size] assign[=] name[file].properties.content_length
if compare[name[file_size] greater_or_equal[>=] name[self].MAX_SINGLE_GET_SIZE] begin[:]
call[name[_download_file_chunks], parameter[name[self], name[share_name], name[directory_name], name[file_name], name[file_size], name[self].MAX_CHUNK_GET_SIZE, name[start_range], name[end_range], name[stream], name[max_connections], name[max_retries], name[retry_wait], name[progress_callback], name[timeout]]]
return[name[file]]
variable[download_size] assign[=] call[name[_get_download_size], parameter[name[start_range], name[end_range], name[file_size]]]
if name[progress_callback] begin[:]
call[name[progress_callback], parameter[constant[0], name[download_size]]]
variable[file] assign[=] call[name[self]._get_file, parameter[name[share_name], name[directory_name], name[file_name]]]
if compare[name[file].content is_not constant[None]] begin[:]
call[name[stream].write, parameter[name[file].content]]
if name[progress_callback] begin[:]
variable[download_size] assign[=] call[name[len], parameter[name[file].content]]
call[name[progress_callback], parameter[name[download_size], name[download_size]]]
name[file].content assign[=] constant[None]
return[name[file]] | keyword[def] identifier[get_file_to_stream] (
identifier[self] , identifier[share_name] , identifier[directory_name] , identifier[file_name] , identifier[stream] ,
identifier[start_range] = keyword[None] , identifier[end_range] = keyword[None] , identifier[range_get_content_md5] = keyword[None] ,
identifier[progress_callback] = keyword[None] , identifier[max_connections] = literal[int] , identifier[max_retries] = literal[int] ,
identifier[retry_wait] = literal[int] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[_validate_not_none] ( literal[string] , identifier[share_name] )
identifier[_validate_not_none] ( literal[string] , identifier[file_name] )
identifier[_validate_not_none] ( literal[string] , identifier[stream] )
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] ,) keyword[and] identifier[max_connections] > literal[int] keyword[and] keyword[not] identifier[stream] . identifier[seekable] ():
keyword[raise] identifier[ValueError] ( identifier[_ERROR_PARALLEL_NOT_SEEKABLE] )
identifier[file_size] = keyword[None]
keyword[if] identifier[max_connections] > literal[int] keyword[and] identifier[range_get_content_md5] keyword[is] keyword[None] :
identifier[file] = identifier[self] . identifier[get_file_properties] ( identifier[share_name] , identifier[directory_name] ,
identifier[file_name] , identifier[timeout] = identifier[timeout] )
identifier[file_size] = identifier[file] . identifier[properties] . identifier[content_length]
keyword[if] identifier[file_size] >= identifier[self] . identifier[MAX_SINGLE_GET_SIZE] :
identifier[_download_file_chunks] (
identifier[self] ,
identifier[share_name] ,
identifier[directory_name] ,
identifier[file_name] ,
identifier[file_size] ,
identifier[self] . identifier[MAX_CHUNK_GET_SIZE] ,
identifier[start_range] ,
identifier[end_range] ,
identifier[stream] ,
identifier[max_connections] ,
identifier[max_retries] ,
identifier[retry_wait] ,
identifier[progress_callback] ,
identifier[timeout]
)
keyword[return] identifier[file]
identifier[download_size] = identifier[_get_download_size] ( identifier[start_range] , identifier[end_range] , identifier[file_size] )
keyword[if] identifier[progress_callback] :
identifier[progress_callback] ( literal[int] , identifier[download_size] )
identifier[file] = identifier[self] . identifier[_get_file] (
identifier[share_name] ,
identifier[directory_name] ,
identifier[file_name] ,
identifier[start_range] = identifier[start_range] ,
identifier[end_range] = identifier[end_range] ,
identifier[range_get_content_md5] = identifier[range_get_content_md5] ,
identifier[timeout] = identifier[timeout] )
keyword[if] identifier[file] . identifier[content] keyword[is] keyword[not] keyword[None] :
identifier[stream] . identifier[write] ( identifier[file] . identifier[content] )
keyword[if] identifier[progress_callback] :
identifier[download_size] = identifier[len] ( identifier[file] . identifier[content] )
identifier[progress_callback] ( identifier[download_size] , identifier[download_size] )
identifier[file] . identifier[content] = keyword[None]
keyword[return] identifier[file] | def get_file_to_stream(self, share_name, directory_name, file_name, stream, start_range=None, end_range=None, range_get_content_md5=None, progress_callback=None, max_connections=1, max_retries=5, retry_wait=1.0, timeout=None):
"""
Downloads a file to a stream, with automatic chunking and progress
notifications. Returns an instance of :class:`File` with properties
and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param io.IOBase stream:
Opened file/stream to write to.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool range_get_content_md5:
When this header is set to True and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Set to 1 to download the file sequentially.
Set to 2 or greater if you want to download a file larger than 64MB in chunks.
If the file size does not exceed 64MB it will be downloaded in one chunk.
:param int max_retries:
Number of times to retry download of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
"""
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('stream', stream)
if sys.version_info >= (3,) and max_connections > 1 and (not stream.seekable()):
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) # depends on [control=['if'], data=[]]
# Only get properties if parallelism will actually be used
file_size = None
if max_connections > 1 and range_get_content_md5 is None:
file = self.get_file_properties(share_name, directory_name, file_name, timeout=timeout)
file_size = file.properties.content_length
# If file size is large, use parallel download
if file_size >= self.MAX_SINGLE_GET_SIZE:
_download_file_chunks(self, share_name, directory_name, file_name, file_size, self.MAX_CHUNK_GET_SIZE, start_range, end_range, stream, max_connections, max_retries, retry_wait, progress_callback, timeout)
return file # depends on [control=['if'], data=['file_size']] # depends on [control=['if'], data=[]]
# If parallelism is off or the file is small, do a single download
download_size = _get_download_size(start_range, end_range, file_size)
if progress_callback:
progress_callback(0, download_size) # depends on [control=['if'], data=[]]
file = self._get_file(share_name, directory_name, file_name, start_range=start_range, end_range=end_range, range_get_content_md5=range_get_content_md5, timeout=timeout)
if file.content is not None:
stream.write(file.content) # depends on [control=['if'], data=[]]
if progress_callback:
download_size = len(file.content)
progress_callback(download_size, download_size) # depends on [control=['if'], data=[]]
file.content = None # Clear file content since output has been written to user stream
return file |
def change_column(self, name, options):
"""
Changes column details.
:param name: The column to change.
:type name: str
:param options: The new options.
:type options: str
:rtype: Table
"""
column = self.get_column(name)
column.set_options(options)
return self | def function[change_column, parameter[self, name, options]]:
constant[
Changes column details.
:param name: The column to change.
:type name: str
:param options: The new options.
:type options: str
:rtype: Table
]
variable[column] assign[=] call[name[self].get_column, parameter[name[name]]]
call[name[column].set_options, parameter[name[options]]]
return[name[self]] | keyword[def] identifier[change_column] ( identifier[self] , identifier[name] , identifier[options] ):
literal[string]
identifier[column] = identifier[self] . identifier[get_column] ( identifier[name] )
identifier[column] . identifier[set_options] ( identifier[options] )
keyword[return] identifier[self] | def change_column(self, name, options):
"""
Changes column details.
:param name: The column to change.
:type name: str
:param options: The new options.
:type options: str
:rtype: Table
"""
column = self.get_column(name)
column.set_options(options)
return self |
def two_values_melt(
df,
first_value_vars: List[str],
second_value_vars: List[str],
var_name: str,
value_name: str
):
"""
Transforms one or multiple columns into rows.
Unlike melt function, two value columns can be returned by
the function (e.g. an evolution column and a price column)
---
### Parameters
*mandatory :*
- `first_value_vars` (*list of str*): name of the columns corresponding to the first returned value column
- `second_value_vars` (*list of str*): name of the columns corresponding to the second returned value column
- `var_name` (*str*): name of the column containing values in first_value_vars
- `value_name` (*str*): suffix of the two value columns (suffix_first / suffix_second)
---
### Example
**Input**
| Region | avg | total | evo_avg | evo_total |
|:---------:|:--------:|:-----------:|:--------:|:-----------:|
| A | 50| 100 | 1 | 4 |
| B | 40 | 250 | 2 | 5 |
```cson
two_values_melt:
first_value_vars: ["avg", "total"]
second_value_vars: ["evo_avg", "evo_total"]
var_name: "type"
value_name: "value"
```
**Output**
| Region | type | value_first | value_second |
|:---------:|:--------:|:------------:|:-------------:|
| A | avg| 50 | 1 |
| A | total| 100 | 4 |
| B | avg| 40 | 2 |
| B | avg| 250 | 5 |
"""
value_name_first = value_name + '_first'
value_name_second = value_name + '_second'
# Melt on the first value columns
melt_first_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in first_value_vars],
value_vars=first_value_vars,
var_name=var_name,
value_name=value_name_first)
melt_first_value.drop(second_value_vars, axis=1, inplace=True)
# Melt on the second value columns
melt_second_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in second_value_vars],
value_vars=second_value_vars,
var_name=var_name,
value_name=value_name_second)
# Since there are two value columns, there is no need to keep the
# second_value_vars names. And it will make things easier for the merge.
normalize_types = {k: v for k, v in zip(second_value_vars, first_value_vars)}
melt_second_value.replace(normalize_types, inplace=True)
melt_second_value.drop(first_value_vars, axis=1, inplace=True)
on_cols = list(melt_first_value)
on_cols.remove(value_name_first)
return pd.merge(melt_first_value, melt_second_value, on=on_cols, how='outer') | def function[two_values_melt, parameter[df, first_value_vars, second_value_vars, var_name, value_name]]:
constant[
Transforms one or multiple columns into rows.
Unlike melt function, two value columns can be returned by
the function (e.g. an evolution column and a price column)
---
### Parameters
*mandatory :*
- `first_value_vars` (*list of str*): name of the columns corresponding to the first returned value column
- `second_value_vars` (*list of str*): name of the columns corresponding to the second returned value column
- `var_name` (*str*): name of the column containing values in first_value_vars
- `value_name` (*str*): suffix of the two value columns (suffix_first / suffix_second)
---
### Example
**Input**
| Region | avg | total | evo_avg | evo_total |
|:---------:|:--------:|:-----------:|:--------:|:-----------:|
| A | 50| 100 | 1 | 4 |
| B | 40 | 250 | 2 | 5 |
```cson
two_values_melt:
first_value_vars: ["avg", "total"]
second_value_vars: ["evo_avg", "evo_total"]
var_name: "type"
value_name: "value"
```
**Output**
| Region | type | value_first | value_second |
|:---------:|:--------:|:------------:|:-------------:|
| A | avg| 50 | 1 |
| A | total| 100 | 4 |
| B | avg| 40 | 2 |
| B | avg| 250 | 5 |
]
variable[value_name_first] assign[=] binary_operation[name[value_name] + constant[_first]]
variable[value_name_second] assign[=] binary_operation[name[value_name] + constant[_second]]
variable[melt_first_value] assign[=] call[name[pd].melt, parameter[name[df]]]
call[name[melt_first_value].drop, parameter[name[second_value_vars]]]
variable[melt_second_value] assign[=] call[name[pd].melt, parameter[name[df]]]
variable[normalize_types] assign[=] <ast.DictComp object at 0x7da1b0550b50>
call[name[melt_second_value].replace, parameter[name[normalize_types]]]
call[name[melt_second_value].drop, parameter[name[first_value_vars]]]
variable[on_cols] assign[=] call[name[list], parameter[name[melt_first_value]]]
call[name[on_cols].remove, parameter[name[value_name_first]]]
return[call[name[pd].merge, parameter[name[melt_first_value], name[melt_second_value]]]] | keyword[def] identifier[two_values_melt] (
identifier[df] ,
identifier[first_value_vars] : identifier[List] [ identifier[str] ],
identifier[second_value_vars] : identifier[List] [ identifier[str] ],
identifier[var_name] : identifier[str] ,
identifier[value_name] : identifier[str]
):
literal[string]
identifier[value_name_first] = identifier[value_name] + literal[string]
identifier[value_name_second] = identifier[value_name] + literal[string]
identifier[melt_first_value] = identifier[pd] . identifier[melt] ( identifier[df] ,
identifier[id_vars] =[ identifier[col] keyword[for] identifier[col] keyword[in] identifier[list] ( identifier[df] ) keyword[if]
identifier[col] keyword[not] keyword[in] identifier[first_value_vars] ],
identifier[value_vars] = identifier[first_value_vars] ,
identifier[var_name] = identifier[var_name] ,
identifier[value_name] = identifier[value_name_first] )
identifier[melt_first_value] . identifier[drop] ( identifier[second_value_vars] , identifier[axis] = literal[int] , identifier[inplace] = keyword[True] )
identifier[melt_second_value] = identifier[pd] . identifier[melt] ( identifier[df] ,
identifier[id_vars] =[ identifier[col] keyword[for] identifier[col] keyword[in] identifier[list] ( identifier[df] ) keyword[if]
identifier[col] keyword[not] keyword[in] identifier[second_value_vars] ],
identifier[value_vars] = identifier[second_value_vars] ,
identifier[var_name] = identifier[var_name] ,
identifier[value_name] = identifier[value_name_second] )
identifier[normalize_types] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[zip] ( identifier[second_value_vars] , identifier[first_value_vars] )}
identifier[melt_second_value] . identifier[replace] ( identifier[normalize_types] , identifier[inplace] = keyword[True] )
identifier[melt_second_value] . identifier[drop] ( identifier[first_value_vars] , identifier[axis] = literal[int] , identifier[inplace] = keyword[True] )
identifier[on_cols] = identifier[list] ( identifier[melt_first_value] )
identifier[on_cols] . identifier[remove] ( identifier[value_name_first] )
keyword[return] identifier[pd] . identifier[merge] ( identifier[melt_first_value] , identifier[melt_second_value] , identifier[on] = identifier[on_cols] , identifier[how] = literal[string] ) | def two_values_melt(df, first_value_vars: List[str], second_value_vars: List[str], var_name: str, value_name: str):
"""
Transforms one or multiple columns into rows.
Unlike melt function, two value columns can be returned by
the function (e.g. an evolution column and a price column)
---
### Parameters
*mandatory :*
- `first_value_vars` (*list of str*): name of the columns corresponding to the first returned value column
- `second_value_vars` (*list of str*): name of the columns corresponding to the second returned value column
- `var_name` (*str*): name of the column containing values in first_value_vars
- `value_name` (*str*): suffix of the two value columns (suffix_first / suffix_second)
---
### Example
**Input**
| Region | avg | total | evo_avg | evo_total |
|:---------:|:--------:|:-----------:|:--------:|:-----------:|
| A | 50| 100 | 1 | 4 |
| B | 40 | 250 | 2 | 5 |
```cson
two_values_melt:
first_value_vars: ["avg", "total"]
second_value_vars: ["evo_avg", "evo_total"]
var_name: "type"
value_name: "value"
```
**Output**
| Region | type | value_first | value_second |
|:---------:|:--------:|:------------:|:-------------:|
| A | avg| 50 | 1 |
| A | total| 100 | 4 |
| B | avg| 40 | 2 |
| B | avg| 250 | 5 |
"""
value_name_first = value_name + '_first'
value_name_second = value_name + '_second'
# Melt on the first value columns
melt_first_value = pd.melt(df, id_vars=[col for col in list(df) if col not in first_value_vars], value_vars=first_value_vars, var_name=var_name, value_name=value_name_first)
melt_first_value.drop(second_value_vars, axis=1, inplace=True)
# Melt on the second value columns
melt_second_value = pd.melt(df, id_vars=[col for col in list(df) if col not in second_value_vars], value_vars=second_value_vars, var_name=var_name, value_name=value_name_second)
# Since there are two value columns, there is no need to keep the
# second_value_vars names. And it will make things easier for the merge.
normalize_types = {k: v for (k, v) in zip(second_value_vars, first_value_vars)}
melt_second_value.replace(normalize_types, inplace=True)
melt_second_value.drop(first_value_vars, axis=1, inplace=True)
on_cols = list(melt_first_value)
on_cols.remove(value_name_first)
return pd.merge(melt_first_value, melt_second_value, on=on_cols, how='outer') |
def flip(self):
"""
Flip colors of a node and its children.
"""
left = self.left._replace(red=not self.left.red)
right = self.right._replace(red=not self.right.red)
top = self._replace(left=left, right=right, red=not self.red)
return top | def function[flip, parameter[self]]:
constant[
Flip colors of a node and its children.
]
variable[left] assign[=] call[name[self].left._replace, parameter[]]
variable[right] assign[=] call[name[self].right._replace, parameter[]]
variable[top] assign[=] call[name[self]._replace, parameter[]]
return[name[top]] | keyword[def] identifier[flip] ( identifier[self] ):
literal[string]
identifier[left] = identifier[self] . identifier[left] . identifier[_replace] ( identifier[red] = keyword[not] identifier[self] . identifier[left] . identifier[red] )
identifier[right] = identifier[self] . identifier[right] . identifier[_replace] ( identifier[red] = keyword[not] identifier[self] . identifier[right] . identifier[red] )
identifier[top] = identifier[self] . identifier[_replace] ( identifier[left] = identifier[left] , identifier[right] = identifier[right] , identifier[red] = keyword[not] identifier[self] . identifier[red] )
keyword[return] identifier[top] | def flip(self):
"""
Flip colors of a node and its children.
"""
left = self.left._replace(red=not self.left.red)
right = self.right._replace(red=not self.right.red)
top = self._replace(left=left, right=right, red=not self.red)
return top |
def marginal_loglike(self, x):
"""Marginal log-likelihood.
Returns ``L_marg(x) = \int L(x,y|z') L(y) dy``
This will used the cached '~fermipy.castro.Interpolator'
object if possible, and construct it if needed.
"""
if self._marg_interp is None:
# This calculates values and caches the spline
return self._marginal_loglike(x)
x = np.array(x, ndmin=1)
return self._marg_interp(x) | def function[marginal_loglike, parameter[self, x]]:
constant[Marginal log-likelihood.
Returns ``L_marg(x) = \int L(x,y|z') L(y) dy``
This will used the cached '~fermipy.castro.Interpolator'
object if possible, and construct it if needed.
]
if compare[name[self]._marg_interp is constant[None]] begin[:]
return[call[name[self]._marginal_loglike, parameter[name[x]]]]
variable[x] assign[=] call[name[np].array, parameter[name[x]]]
return[call[name[self]._marg_interp, parameter[name[x]]]] | keyword[def] identifier[marginal_loglike] ( identifier[self] , identifier[x] ):
literal[string]
keyword[if] identifier[self] . identifier[_marg_interp] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_marginal_loglike] ( identifier[x] )
identifier[x] = identifier[np] . identifier[array] ( identifier[x] , identifier[ndmin] = literal[int] )
keyword[return] identifier[self] . identifier[_marg_interp] ( identifier[x] ) | def marginal_loglike(self, x):
"""Marginal log-likelihood.
Returns ``L_marg(x) = \\int L(x,y|z') L(y) dy``
This will used the cached '~fermipy.castro.Interpolator'
object if possible, and construct it if needed.
"""
if self._marg_interp is None:
# This calculates values and caches the spline
return self._marginal_loglike(x) # depends on [control=['if'], data=[]]
x = np.array(x, ndmin=1)
return self._marg_interp(x) |
def energies(self, samples_like, dtype=np.float):
"""Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies.
"""
samples, labels = as_samples(samples_like)
if all(v == idx for idx, v in enumerate(labels)):
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype)
else:
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype)
energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset
return np.asarray(energies, dtype=dtype) | def function[energies, parameter[self, samples_like, dtype]]:
constant[Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies.
]
<ast.Tuple object at 0x7da1b07f6dd0> assign[=] call[name[as_samples], parameter[name[samples_like]]]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b07f6b00>]] begin[:]
<ast.Tuple object at 0x7da1b07f6f80> assign[=] call[name[self].to_numpy_vectors, parameter[]]
variable[energies] assign[=] binary_operation[binary_operation[call[name[samples].dot, parameter[name[ldata]]] + call[binary_operation[call[name[samples]][tuple[[<ast.Slice object at 0x7da1b07f7220>, <ast.Name object at 0x7da1b07f7100>]]] * call[name[samples]][tuple[[<ast.Slice object at 0x7da1b07f5210>, <ast.Name object at 0x7da1b07f6050>]]]].dot, parameter[name[qdata]]]] + name[offset]]
return[call[name[np].asarray, parameter[name[energies]]]] | keyword[def] identifier[energies] ( identifier[self] , identifier[samples_like] , identifier[dtype] = identifier[np] . identifier[float] ):
literal[string]
identifier[samples] , identifier[labels] = identifier[as_samples] ( identifier[samples_like] )
keyword[if] identifier[all] ( identifier[v] == identifier[idx] keyword[for] identifier[idx] , identifier[v] keyword[in] identifier[enumerate] ( identifier[labels] )):
identifier[ldata] ,( identifier[irow] , identifier[icol] , identifier[qdata] ), identifier[offset] = identifier[self] . identifier[to_numpy_vectors] ( identifier[dtype] = identifier[dtype] )
keyword[else] :
identifier[ldata] ,( identifier[irow] , identifier[icol] , identifier[qdata] ), identifier[offset] = identifier[self] . identifier[to_numpy_vectors] ( identifier[variable_order] = identifier[labels] , identifier[dtype] = identifier[dtype] )
identifier[energies] = identifier[samples] . identifier[dot] ( identifier[ldata] )+( identifier[samples] [:, identifier[irow] ]* identifier[samples] [:, identifier[icol] ]). identifier[dot] ( identifier[qdata] )+ identifier[offset]
keyword[return] identifier[np] . identifier[asarray] ( identifier[energies] , identifier[dtype] = identifier[dtype] ) | def energies(self, samples_like, dtype=np.float):
"""Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies.
"""
(samples, labels) = as_samples(samples_like)
if all((v == idx for (idx, v) in enumerate(labels))):
(ldata, (irow, icol, qdata), offset) = self.to_numpy_vectors(dtype=dtype) # depends on [control=['if'], data=[]]
else:
(ldata, (irow, icol, qdata), offset) = self.to_numpy_vectors(variable_order=labels, dtype=dtype)
energies = samples.dot(ldata) + (samples[:, irow] * samples[:, icol]).dot(qdata) + offset
return np.asarray(energies, dtype=dtype) |
def lt(computation: BaseComputation) -> None:
"""
Lesser Comparison
"""
left, right = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if left < right:
result = 1
else:
result = 0
computation.stack_push(result) | def function[lt, parameter[computation]]:
constant[
Lesser Comparison
]
<ast.Tuple object at 0x7da1b16451e0> assign[=] call[name[computation].stack_pop, parameter[]]
if compare[name[left] less[<] name[right]] begin[:]
variable[result] assign[=] constant[1]
call[name[computation].stack_push, parameter[name[result]]] | keyword[def] identifier[lt] ( identifier[computation] : identifier[BaseComputation] )-> keyword[None] :
literal[string]
identifier[left] , identifier[right] = identifier[computation] . identifier[stack_pop] ( identifier[num_items] = literal[int] , identifier[type_hint] = identifier[constants] . identifier[UINT256] )
keyword[if] identifier[left] < identifier[right] :
identifier[result] = literal[int]
keyword[else] :
identifier[result] = literal[int]
identifier[computation] . identifier[stack_push] ( identifier[result] ) | def lt(computation: BaseComputation) -> None:
"""
Lesser Comparison
"""
(left, right) = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if left < right:
result = 1 # depends on [control=['if'], data=[]]
else:
result = 0
computation.stack_push(result) |
def cross(self, vec):
"""Cross product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector')
return Vector3(0, 0, np.asscalar(np.cross(self, vec))) | def function[cross, parameter[self, vec]]:
constant[Cross product with another vector]
if <ast.UnaryOp object at 0x7da18eb55300> begin[:]
<ast.Raise object at 0x7da1b1c7f2b0>
return[call[name[Vector3], parameter[constant[0], constant[0], call[name[np].asscalar, parameter[call[name[np].cross, parameter[name[self], name[vec]]]]]]]] | keyword[def] identifier[cross] ( identifier[self] , identifier[vec] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[vec] , identifier[self] . identifier[__class__] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[Vector3] ( literal[int] , literal[int] , identifier[np] . identifier[asscalar] ( identifier[np] . identifier[cross] ( identifier[self] , identifier[vec] ))) | def cross(self, vec):
"""Cross product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector') # depends on [control=['if'], data=[]]
return Vector3(0, 0, np.asscalar(np.cross(self, vec))) |
def _construct_instance(cls, values):
"""
method used to construct instances from query results
this is where polymorphic deserialization occurs
"""
# we're going to take the values, which is from the DB as a dict
# and translate that into our local fields
# the db_map is a db_field -> model field map
if cls._db_map:
values = dict((cls._db_map.get(k, k), v) for k, v in values.items())
if cls._is_polymorphic:
disc_key = values.get(cls._discriminator_column_name)
if disc_key is None:
raise PolymorphicModelException('discriminator value was not found in values')
poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
poly_base._discover_polymorphic_submodels()
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
raise PolymorphicModelException(
'unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__)
)
if not issubclass(klass, cls):
raise PolymorphicModelException(
'{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__)
)
values = dict((k, v) for k, v in values.items() if k in klass._columns.keys())
else:
klass = cls
instance = klass(**values)
instance._set_persisted(force=True)
return instance | def function[_construct_instance, parameter[cls, values]]:
constant[
method used to construct instances from query results
this is where polymorphic deserialization occurs
]
if name[cls]._db_map begin[:]
variable[values] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18bc70a60>]]
if name[cls]._is_polymorphic begin[:]
variable[disc_key] assign[=] call[name[values].get, parameter[name[cls]._discriminator_column_name]]
if compare[name[disc_key] is constant[None]] begin[:]
<ast.Raise object at 0x7da18bc72e30>
variable[poly_base] assign[=] <ast.IfExp object at 0x7da18bc71ba0>
variable[klass] assign[=] call[name[poly_base]._get_model_by_discriminator_value, parameter[name[disc_key]]]
if compare[name[klass] is constant[None]] begin[:]
call[name[poly_base]._discover_polymorphic_submodels, parameter[]]
variable[klass] assign[=] call[name[poly_base]._get_model_by_discriminator_value, parameter[name[disc_key]]]
if compare[name[klass] is constant[None]] begin[:]
<ast.Raise object at 0x7da18bc700d0>
if <ast.UnaryOp object at 0x7da1b22bb8b0> begin[:]
<ast.Raise object at 0x7da1b22b89d0>
variable[values] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b22ba830>]]
variable[instance] assign[=] call[name[klass], parameter[]]
call[name[instance]._set_persisted, parameter[]]
return[name[instance]] | keyword[def] identifier[_construct_instance] ( identifier[cls] , identifier[values] ):
literal[string]
keyword[if] identifier[cls] . identifier[_db_map] :
identifier[values] = identifier[dict] (( identifier[cls] . identifier[_db_map] . identifier[get] ( identifier[k] , identifier[k] ), identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[values] . identifier[items] ())
keyword[if] identifier[cls] . identifier[_is_polymorphic] :
identifier[disc_key] = identifier[values] . identifier[get] ( identifier[cls] . identifier[_discriminator_column_name] )
keyword[if] identifier[disc_key] keyword[is] keyword[None] :
keyword[raise] identifier[PolymorphicModelException] ( literal[string] )
identifier[poly_base] = identifier[cls] keyword[if] identifier[cls] . identifier[_is_polymorphic_base] keyword[else] identifier[cls] . identifier[_polymorphic_base]
identifier[klass] = identifier[poly_base] . identifier[_get_model_by_discriminator_value] ( identifier[disc_key] )
keyword[if] identifier[klass] keyword[is] keyword[None] :
identifier[poly_base] . identifier[_discover_polymorphic_submodels] ()
identifier[klass] = identifier[poly_base] . identifier[_get_model_by_discriminator_value] ( identifier[disc_key] )
keyword[if] identifier[klass] keyword[is] keyword[None] :
keyword[raise] identifier[PolymorphicModelException] (
literal[string] . identifier[format] ( identifier[disc_key] , identifier[poly_base] . identifier[__name__] )
)
keyword[if] keyword[not] identifier[issubclass] ( identifier[klass] , identifier[cls] ):
keyword[raise] identifier[PolymorphicModelException] (
literal[string] . identifier[format] ( identifier[klass] . identifier[__name__] , identifier[cls] . identifier[__name__] )
)
identifier[values] = identifier[dict] (( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[values] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[klass] . identifier[_columns] . identifier[keys] ())
keyword[else] :
identifier[klass] = identifier[cls]
identifier[instance] = identifier[klass] (** identifier[values] )
identifier[instance] . identifier[_set_persisted] ( identifier[force] = keyword[True] )
keyword[return] identifier[instance] | def _construct_instance(cls, values):
"""
method used to construct instances from query results
this is where polymorphic deserialization occurs
"""
# we're going to take the values, which is from the DB as a dict
# and translate that into our local fields
# the db_map is a db_field -> model field map
if cls._db_map:
values = dict(((cls._db_map.get(k, k), v) for (k, v) in values.items())) # depends on [control=['if'], data=[]]
if cls._is_polymorphic:
disc_key = values.get(cls._discriminator_column_name)
if disc_key is None:
raise PolymorphicModelException('discriminator value was not found in values') # depends on [control=['if'], data=[]]
poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
poly_base._discover_polymorphic_submodels()
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
raise PolymorphicModelException('unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['klass']]
if not issubclass(klass, cls):
raise PolymorphicModelException('{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__)) # depends on [control=['if'], data=[]]
values = dict(((k, v) for (k, v) in values.items() if k in klass._columns.keys())) # depends on [control=['if'], data=[]]
else:
klass = cls
instance = klass(**values)
instance._set_persisted(force=True)
return instance |
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue | def function[get_fws, parameter[value]]:
constant[FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
]
variable[newvalue] assign[=] call[name[value].lstrip, parameter[]]
variable[fws] assign[=] call[name[WhiteSpaceTerminal], parameter[call[name[value]][<ast.Slice object at 0x7da18f58ee30>], constant[fws]]]
return[tuple[[<ast.Name object at 0x7da18f58cf40>, <ast.Name object at 0x7da18f58da80>]]] | keyword[def] identifier[get_fws] ( identifier[value] ):
literal[string]
identifier[newvalue] = identifier[value] . identifier[lstrip] ()
identifier[fws] = identifier[WhiteSpaceTerminal] ( identifier[value] [: identifier[len] ( identifier[value] )- identifier[len] ( identifier[newvalue] )], literal[string] )
keyword[return] identifier[fws] , identifier[newvalue] | def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value) - len(newvalue)], 'fws')
return (fws, newvalue) |
def html_email(email, title=None):
"""
>>> html_email('username@example.com')
'<a href="mailto:username@example.com">username@example.com</a>'
"""
if not title:
title = email
return '<a href="mailto:{email}">{title}</a>'.format(email=email, title=title) | def function[html_email, parameter[email, title]]:
constant[
>>> html_email('username@example.com')
'<a href="mailto:username@example.com">username@example.com</a>'
]
if <ast.UnaryOp object at 0x7da18c4cdcc0> begin[:]
variable[title] assign[=] name[email]
return[call[constant[<a href="mailto:{email}">{title}</a>].format, parameter[]]] | keyword[def] identifier[html_email] ( identifier[email] , identifier[title] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[title] :
identifier[title] = identifier[email]
keyword[return] literal[string] . identifier[format] ( identifier[email] = identifier[email] , identifier[title] = identifier[title] ) | def html_email(email, title=None):
"""
>>> html_email('username@example.com')
'<a href="mailto:username@example.com">username@example.com</a>'
"""
if not title:
title = email # depends on [control=['if'], data=[]]
return '<a href="mailto:{email}">{title}</a>'.format(email=email, title=title) |
def get_backreferences(context, relationship=None, as_brains=None):
"""Return all objects which use a UIDReferenceField to reference context.
:param context: The object which is the target of references.
:param relationship: The relationship name of the UIDReferenceField.
:param as_brains: Requests that this function returns only catalog brains.
as_brains can only be used if a relationship has been specified.
This function can be called with or without specifying a relationship.
- If a relationship is provided, the return value will be a list of items
which reference the context using the provided relationship.
If relationship is provided, then you can request that the backrefs
should be returned as catalog brains. If you do not specify as_brains,
the raw list of UIDs will be returned.
- If the relationship is not provided, then the entire set of
backreferences to the context object is returned (by reference) as a
dictionary. This value can then be modified in-place, to edit the stored
backreferences.
"""
instance = context.aq_base
raw_backrefs = get_storage(instance)
if not relationship:
assert not as_brains, "You cannot use as_brains with no relationship"
return raw_backrefs
backrefs = list(raw_backrefs.get(relationship, []))
if not backrefs:
return []
if not as_brains:
return backrefs
cat = _get_catalog_for_uid(backrefs[0])
return cat(UID=backrefs) | def function[get_backreferences, parameter[context, relationship, as_brains]]:
constant[Return all objects which use a UIDReferenceField to reference context.
:param context: The object which is the target of references.
:param relationship: The relationship name of the UIDReferenceField.
:param as_brains: Requests that this function returns only catalog brains.
as_brains can only be used if a relationship has been specified.
This function can be called with or without specifying a relationship.
- If a relationship is provided, the return value will be a list of items
which reference the context using the provided relationship.
If relationship is provided, then you can request that the backrefs
should be returned as catalog brains. If you do not specify as_brains,
the raw list of UIDs will be returned.
- If the relationship is not provided, then the entire set of
backreferences to the context object is returned (by reference) as a
dictionary. This value can then be modified in-place, to edit the stored
backreferences.
]
variable[instance] assign[=] name[context].aq_base
variable[raw_backrefs] assign[=] call[name[get_storage], parameter[name[instance]]]
if <ast.UnaryOp object at 0x7da18eb54550> begin[:]
assert[<ast.UnaryOp object at 0x7da18eb55f00>]
return[name[raw_backrefs]]
variable[backrefs] assign[=] call[name[list], parameter[call[name[raw_backrefs].get, parameter[name[relationship], list[[]]]]]]
if <ast.UnaryOp object at 0x7da18eb57af0> begin[:]
return[list[[]]]
if <ast.UnaryOp object at 0x7da18eb56710> begin[:]
return[name[backrefs]]
variable[cat] assign[=] call[name[_get_catalog_for_uid], parameter[call[name[backrefs]][constant[0]]]]
return[call[name[cat], parameter[]]] | keyword[def] identifier[get_backreferences] ( identifier[context] , identifier[relationship] = keyword[None] , identifier[as_brains] = keyword[None] ):
literal[string]
identifier[instance] = identifier[context] . identifier[aq_base]
identifier[raw_backrefs] = identifier[get_storage] ( identifier[instance] )
keyword[if] keyword[not] identifier[relationship] :
keyword[assert] keyword[not] identifier[as_brains] , literal[string]
keyword[return] identifier[raw_backrefs]
identifier[backrefs] = identifier[list] ( identifier[raw_backrefs] . identifier[get] ( identifier[relationship] ,[]))
keyword[if] keyword[not] identifier[backrefs] :
keyword[return] []
keyword[if] keyword[not] identifier[as_brains] :
keyword[return] identifier[backrefs]
identifier[cat] = identifier[_get_catalog_for_uid] ( identifier[backrefs] [ literal[int] ])
keyword[return] identifier[cat] ( identifier[UID] = identifier[backrefs] ) | def get_backreferences(context, relationship=None, as_brains=None):
"""Return all objects which use a UIDReferenceField to reference context.
:param context: The object which is the target of references.
:param relationship: The relationship name of the UIDReferenceField.
:param as_brains: Requests that this function returns only catalog brains.
as_brains can only be used if a relationship has been specified.
This function can be called with or without specifying a relationship.
- If a relationship is provided, the return value will be a list of items
which reference the context using the provided relationship.
If relationship is provided, then you can request that the backrefs
should be returned as catalog brains. If you do not specify as_brains,
the raw list of UIDs will be returned.
- If the relationship is not provided, then the entire set of
backreferences to the context object is returned (by reference) as a
dictionary. This value can then be modified in-place, to edit the stored
backreferences.
"""
instance = context.aq_base
raw_backrefs = get_storage(instance)
if not relationship:
assert not as_brains, 'You cannot use as_brains with no relationship'
return raw_backrefs # depends on [control=['if'], data=[]]
backrefs = list(raw_backrefs.get(relationship, []))
if not backrefs:
return [] # depends on [control=['if'], data=[]]
if not as_brains:
return backrefs # depends on [control=['if'], data=[]]
cat = _get_catalog_for_uid(backrefs[0])
return cat(UID=backrefs) |
def draw_selection(self, surf):
"""Draw the selection rectange."""
select_start = self._select_start # Cache to avoid a race condition.
if select_start:
mouse_pos = self.get_mouse_pos()
if (mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and
mouse_pos.surf.surf_type == select_start.surf.surf_type):
rect = point.Rect(select_start.world_pos, mouse_pos.world_pos)
surf.draw_rect(colors.green, rect, 1) | def function[draw_selection, parameter[self, surf]]:
constant[Draw the selection rectange.]
variable[select_start] assign[=] name[self]._select_start
if name[select_start] begin[:]
variable[mouse_pos] assign[=] call[name[self].get_mouse_pos, parameter[]]
if <ast.BoolOp object at 0x7da20c6e74c0> begin[:]
variable[rect] assign[=] call[name[point].Rect, parameter[name[select_start].world_pos, name[mouse_pos].world_pos]]
call[name[surf].draw_rect, parameter[name[colors].green, name[rect], constant[1]]] | keyword[def] identifier[draw_selection] ( identifier[self] , identifier[surf] ):
literal[string]
identifier[select_start] = identifier[self] . identifier[_select_start]
keyword[if] identifier[select_start] :
identifier[mouse_pos] = identifier[self] . identifier[get_mouse_pos] ()
keyword[if] ( identifier[mouse_pos] keyword[and] identifier[mouse_pos] . identifier[surf] . identifier[surf_type] & identifier[SurfType] . identifier[SCREEN] keyword[and]
identifier[mouse_pos] . identifier[surf] . identifier[surf_type] == identifier[select_start] . identifier[surf] . identifier[surf_type] ):
identifier[rect] = identifier[point] . identifier[Rect] ( identifier[select_start] . identifier[world_pos] , identifier[mouse_pos] . identifier[world_pos] )
identifier[surf] . identifier[draw_rect] ( identifier[colors] . identifier[green] , identifier[rect] , literal[int] ) | def draw_selection(self, surf):
"""Draw the selection rectange."""
select_start = self._select_start # Cache to avoid a race condition.
if select_start:
mouse_pos = self.get_mouse_pos()
if mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and (mouse_pos.surf.surf_type == select_start.surf.surf_type):
rect = point.Rect(select_start.world_pos, mouse_pos.world_pos)
surf.draw_rect(colors.green, rect, 1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def set_render_manager(self, agent: BaseAgent):
"""
Sets the render manager for the agent.
:param agent: An instance of an agent.
"""
rendering_manager = self.game_interface.renderer.get_rendering_manager(self.index, self.team)
agent._set_renderer(rendering_manager) | def function[set_render_manager, parameter[self, agent]]:
constant[
Sets the render manager for the agent.
:param agent: An instance of an agent.
]
variable[rendering_manager] assign[=] call[name[self].game_interface.renderer.get_rendering_manager, parameter[name[self].index, name[self].team]]
call[name[agent]._set_renderer, parameter[name[rendering_manager]]] | keyword[def] identifier[set_render_manager] ( identifier[self] , identifier[agent] : identifier[BaseAgent] ):
literal[string]
identifier[rendering_manager] = identifier[self] . identifier[game_interface] . identifier[renderer] . identifier[get_rendering_manager] ( identifier[self] . identifier[index] , identifier[self] . identifier[team] )
identifier[agent] . identifier[_set_renderer] ( identifier[rendering_manager] ) | def set_render_manager(self, agent: BaseAgent):
"""
Sets the render manager for the agent.
:param agent: An instance of an agent.
"""
rendering_manager = self.game_interface.renderer.get_rendering_manager(self.index, self.team)
agent._set_renderer(rendering_manager) |
def create_hds_stream(self, localStreamNames, targetFolder, **kwargs):
"""
Create an HDS (HTTP Dynamic Streaming) stream out of an existing
H.264/AAC stream. HDS is used to stream standard MP4 media over
regular HTTP connections.
:param localStreamNames: The stream(s) that will be used as the input.
This is a comma-delimited list of active stream names (local stream
names).
:type localStreamNames: str
:param targetFolder: The folder where all the manifest (*.f4m) and
fragment (f4v*) files will be stored. This folder must be
accessible by the HDS clients. It is usually in the web-root of
the server.
:type targetFolder: str
:param bandwidths: The corresponding bandwidths for each stream listed
in localStreamNames. Again, this can be a comma-delimited list.
:type bandwidths: int
:param chunkBaseName: The base name used to generate the fragments.
:type chunkBaseName: str
:param chunkLength: The length (in seconds) of fragments to be made.
Minimum value is 1 (second).
:type chunkLength: int
:param chunkOnIDR: If true, chunking is performed ONLY on IDR.
Otherwise, chunking is performed whenever chunk length is
achieved.
:type chunkOnIDR: int
:param groupName: The name assigned to the HDS stream or group. If the
localStreamNames parameter contains only one entry and groupName
is not specified, groupName will have the value of the input
stream name.
:type groupName: str
:param keepAlive: If true, the EMS will attempt to reconnect to the
stream source if the connection is severed.
:type keepAlive: int
:param manifestName: The manifest file name.
:type manifestName: str
:param overwriteDestination: If true, it will allow overwrite of
destination files.
:type overwriteDestination: int
:param playlistType: Either appending or rolling.
:type playlistType: str
:param playlistLength: The number of fragments before the server
starts to overwrite the older fragments. Used only when
playlistType is "rolling". Ignored otherwise.
:type playlistLength: int
:param staleRetentionCount: The number of old files kept besides the
ones listed in the current version of the playlist. Only
applicable for rolling playlists.
:type staleRetentionCount: int
:param createMasterPlaylist: If true, a master playlist will be
created.
:type createMasterPlaylist: int
:param cleanupDestination: If true, all manifest and fragment files in
the target folder will be removed before HDS creation is started.
:type cleanupDestination: int
:link: http://docs.evostream.com/ems_api_definition/createhdsstream
"""
return self.protocol.execute('createhdsstream',
localStreamNames=localStreamNames,
targetFolder=targetFolder, **kwargs) | def function[create_hds_stream, parameter[self, localStreamNames, targetFolder]]:
constant[
Create an HDS (HTTP Dynamic Streaming) stream out of an existing
H.264/AAC stream. HDS is used to stream standard MP4 media over
regular HTTP connections.
:param localStreamNames: The stream(s) that will be used as the input.
This is a comma-delimited list of active stream names (local stream
names).
:type localStreamNames: str
:param targetFolder: The folder where all the manifest (*.f4m) and
fragment (f4v*) files will be stored. This folder must be
accessible by the HDS clients. It is usually in the web-root of
the server.
:type targetFolder: str
:param bandwidths: The corresponding bandwidths for each stream listed
in localStreamNames. Again, this can be a comma-delimited list.
:type bandwidths: int
:param chunkBaseName: The base name used to generate the fragments.
:type chunkBaseName: str
:param chunkLength: The length (in seconds) of fragments to be made.
Minimum value is 1 (second).
:type chunkLength: int
:param chunkOnIDR: If true, chunking is performed ONLY on IDR.
Otherwise, chunking is performed whenever chunk length is
achieved.
:type chunkOnIDR: int
:param groupName: The name assigned to the HDS stream or group. If the
localStreamNames parameter contains only one entry and groupName
is not specified, groupName will have the value of the input
stream name.
:type groupName: str
:param keepAlive: If true, the EMS will attempt to reconnect to the
stream source if the connection is severed.
:type keepAlive: int
:param manifestName: The manifest file name.
:type manifestName: str
:param overwriteDestination: If true, it will allow overwrite of
destination files.
:type overwriteDestination: int
:param playlistType: Either appending or rolling.
:type playlistType: str
:param playlistLength: The number of fragments before the server
starts to overwrite the older fragments. Used only when
playlistType is "rolling". Ignored otherwise.
:type playlistLength: int
:param staleRetentionCount: The number of old files kept besides the
ones listed in the current version of the playlist. Only
applicable for rolling playlists.
:type staleRetentionCount: int
:param createMasterPlaylist: If true, a master playlist will be
created.
:type createMasterPlaylist: int
:param cleanupDestination: If true, all manifest and fragment files in
the target folder will be removed before HDS creation is started.
:type cleanupDestination: int
:link: http://docs.evostream.com/ems_api_definition/createhdsstream
]
return[call[name[self].protocol.execute, parameter[constant[createhdsstream]]]] | keyword[def] identifier[create_hds_stream] ( identifier[self] , identifier[localStreamNames] , identifier[targetFolder] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[protocol] . identifier[execute] ( literal[string] ,
identifier[localStreamNames] = identifier[localStreamNames] ,
identifier[targetFolder] = identifier[targetFolder] ,** identifier[kwargs] ) | def create_hds_stream(self, localStreamNames, targetFolder, **kwargs):
"""
Create an HDS (HTTP Dynamic Streaming) stream out of an existing
H.264/AAC stream. HDS is used to stream standard MP4 media over
regular HTTP connections.
:param localStreamNames: The stream(s) that will be used as the input.
This is a comma-delimited list of active stream names (local stream
names).
:type localStreamNames: str
:param targetFolder: The folder where all the manifest (*.f4m) and
fragment (f4v*) files will be stored. This folder must be
accessible by the HDS clients. It is usually in the web-root of
the server.
:type targetFolder: str
:param bandwidths: The corresponding bandwidths for each stream listed
in localStreamNames. Again, this can be a comma-delimited list.
:type bandwidths: int
:param chunkBaseName: The base name used to generate the fragments.
:type chunkBaseName: str
:param chunkLength: The length (in seconds) of fragments to be made.
Minimum value is 1 (second).
:type chunkLength: int
:param chunkOnIDR: If true, chunking is performed ONLY on IDR.
Otherwise, chunking is performed whenever chunk length is
achieved.
:type chunkOnIDR: int
:param groupName: The name assigned to the HDS stream or group. If the
localStreamNames parameter contains only one entry and groupName
is not specified, groupName will have the value of the input
stream name.
:type groupName: str
:param keepAlive: If true, the EMS will attempt to reconnect to the
stream source if the connection is severed.
:type keepAlive: int
:param manifestName: The manifest file name.
:type manifestName: str
:param overwriteDestination: If true, it will allow overwrite of
destination files.
:type overwriteDestination: int
:param playlistType: Either appending or rolling.
:type playlistType: str
:param playlistLength: The number of fragments before the server
starts to overwrite the older fragments. Used only when
playlistType is "rolling". Ignored otherwise.
:type playlistLength: int
:param staleRetentionCount: The number of old files kept besides the
ones listed in the current version of the playlist. Only
applicable for rolling playlists.
:type staleRetentionCount: int
:param createMasterPlaylist: If true, a master playlist will be
created.
:type createMasterPlaylist: int
:param cleanupDestination: If true, all manifest and fragment files in
the target folder will be removed before HDS creation is started.
:type cleanupDestination: int
:link: http://docs.evostream.com/ems_api_definition/createhdsstream
"""
return self.protocol.execute('createhdsstream', localStreamNames=localStreamNames, targetFolder=targetFolder, **kwargs) |
def get_current_user(self, **params):
"""https://developers.coinbase.com/api/v2#show-current-user"""
response = self._get('v2', 'user', params=params)
return self._make_api_object(response, CurrentUser) | def function[get_current_user, parameter[self]]:
constant[https://developers.coinbase.com/api/v2#show-current-user]
variable[response] assign[=] call[name[self]._get, parameter[constant[v2], constant[user]]]
return[call[name[self]._make_api_object, parameter[name[response], name[CurrentUser]]]] | keyword[def] identifier[get_current_user] ( identifier[self] ,** identifier[params] ):
literal[string]
identifier[response] = identifier[self] . identifier[_get] ( literal[string] , literal[string] , identifier[params] = identifier[params] )
keyword[return] identifier[self] . identifier[_make_api_object] ( identifier[response] , identifier[CurrentUser] ) | def get_current_user(self, **params):
"""https://developers.coinbase.com/api/v2#show-current-user"""
response = self._get('v2', 'user', params=params)
return self._make_api_object(response, CurrentUser) |
def add_bgp_error_metadata(code, sub_code, def_desc='unknown'):
"""Decorator for all exceptions that want to set exception class meta-data.
"""
# Check registry if we already have an exception with same code/sub-code
if _EXCEPTION_REGISTRY.get((code, sub_code)) is not None:
raise ValueError('BGPSException with code %d and sub-code %d '
'already defined.' % (code, sub_code))
def decorator(subclass):
"""Sets class constants for exception code and sub-code.
If given class is sub-class of BGPSException we sets class constants.
"""
if issubclass(subclass, BGPSException):
_EXCEPTION_REGISTRY[(code, sub_code)] = subclass
subclass.CODE = code
subclass.SUB_CODE = sub_code
subclass.DEF_DESC = def_desc
return subclass
return decorator | def function[add_bgp_error_metadata, parameter[code, sub_code, def_desc]]:
constant[Decorator for all exceptions that want to set exception class meta-data.
]
if compare[call[name[_EXCEPTION_REGISTRY].get, parameter[tuple[[<ast.Name object at 0x7da1b1a37520>, <ast.Name object at 0x7da1b1a359c0>]]]] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1a36050>
def function[decorator, parameter[subclass]]:
constant[Sets class constants for exception code and sub-code.
If given class is sub-class of BGPSException we sets class constants.
]
if call[name[issubclass], parameter[name[subclass], name[BGPSException]]] begin[:]
call[name[_EXCEPTION_REGISTRY]][tuple[[<ast.Name object at 0x7da1b1a37d90>, <ast.Name object at 0x7da1b1a37c10>]]] assign[=] name[subclass]
name[subclass].CODE assign[=] name[code]
name[subclass].SUB_CODE assign[=] name[sub_code]
name[subclass].DEF_DESC assign[=] name[def_desc]
return[name[subclass]]
return[name[decorator]] | keyword[def] identifier[add_bgp_error_metadata] ( identifier[code] , identifier[sub_code] , identifier[def_desc] = literal[string] ):
literal[string]
keyword[if] identifier[_EXCEPTION_REGISTRY] . identifier[get] (( identifier[code] , identifier[sub_code] )) keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %( identifier[code] , identifier[sub_code] ))
keyword[def] identifier[decorator] ( identifier[subclass] ):
literal[string]
keyword[if] identifier[issubclass] ( identifier[subclass] , identifier[BGPSException] ):
identifier[_EXCEPTION_REGISTRY] [( identifier[code] , identifier[sub_code] )]= identifier[subclass]
identifier[subclass] . identifier[CODE] = identifier[code]
identifier[subclass] . identifier[SUB_CODE] = identifier[sub_code]
identifier[subclass] . identifier[DEF_DESC] = identifier[def_desc]
keyword[return] identifier[subclass]
keyword[return] identifier[decorator] | def add_bgp_error_metadata(code, sub_code, def_desc='unknown'):
"""Decorator for all exceptions that want to set exception class meta-data.
"""
# Check registry if we already have an exception with same code/sub-code
if _EXCEPTION_REGISTRY.get((code, sub_code)) is not None:
raise ValueError('BGPSException with code %d and sub-code %d already defined.' % (code, sub_code)) # depends on [control=['if'], data=[]]
def decorator(subclass):
"""Sets class constants for exception code and sub-code.
If given class is sub-class of BGPSException we sets class constants.
"""
if issubclass(subclass, BGPSException):
_EXCEPTION_REGISTRY[code, sub_code] = subclass
subclass.CODE = code
subclass.SUB_CODE = sub_code
subclass.DEF_DESC = def_desc # depends on [control=['if'], data=[]]
return subclass
return decorator |
def _update_pop(self, pop_size):
"""Updates population according to crossover and fitness criteria."""
self.toolbox.generate()
# simple bound checking
for i in range(len(self.population)):
for j in range(len(self.population[i])):
if self.population[i][j] > 1:
self.population[i][j] = 1
if self.population[i][j] < -1:
self.population[i][j] = -1
self.assign_fitnesses(self.population)
self.toolbox.update(self.population)
self._model_count += len(self.population)
return | def function[_update_pop, parameter[self, pop_size]]:
constant[Updates population according to crossover and fitness criteria.]
call[name[self].toolbox.generate, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].population]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[self].population][name[i]]]]]]] begin[:]
if compare[call[call[name[self].population][name[i]]][name[j]] greater[>] constant[1]] begin[:]
call[call[name[self].population][name[i]]][name[j]] assign[=] constant[1]
if compare[call[call[name[self].population][name[i]]][name[j]] less[<] <ast.UnaryOp object at 0x7da1b282ba30>] begin[:]
call[call[name[self].population][name[i]]][name[j]] assign[=] <ast.UnaryOp object at 0x7da1b282b1f0>
call[name[self].assign_fitnesses, parameter[name[self].population]]
call[name[self].toolbox.update, parameter[name[self].population]]
<ast.AugAssign object at 0x7da1b282b1c0>
return[None] | keyword[def] identifier[_update_pop] ( identifier[self] , identifier[pop_size] ):
literal[string]
identifier[self] . identifier[toolbox] . identifier[generate] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[population] )):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[population] [ identifier[i] ])):
keyword[if] identifier[self] . identifier[population] [ identifier[i] ][ identifier[j] ]> literal[int] :
identifier[self] . identifier[population] [ identifier[i] ][ identifier[j] ]= literal[int]
keyword[if] identifier[self] . identifier[population] [ identifier[i] ][ identifier[j] ]<- literal[int] :
identifier[self] . identifier[population] [ identifier[i] ][ identifier[j] ]=- literal[int]
identifier[self] . identifier[assign_fitnesses] ( identifier[self] . identifier[population] )
identifier[self] . identifier[toolbox] . identifier[update] ( identifier[self] . identifier[population] )
identifier[self] . identifier[_model_count] += identifier[len] ( identifier[self] . identifier[population] )
keyword[return] | def _update_pop(self, pop_size):
"""Updates population according to crossover and fitness criteria."""
self.toolbox.generate()
# simple bound checking
for i in range(len(self.population)):
for j in range(len(self.population[i])):
if self.population[i][j] > 1:
self.population[i][j] = 1 # depends on [control=['if'], data=[]]
if self.population[i][j] < -1:
self.population[i][j] = -1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
self.assign_fitnesses(self.population)
self.toolbox.update(self.population)
self._model_count += len(self.population)
return |
def _requestDetails(self, ip_address=None):
"""Get IP address data by sending request to IPinfo API."""
if ip_address not in self.cache:
url = self.API_URL
if ip_address:
url += '/' + ip_address
response = requests.get(url, headers=self._get_headers(), **self.request_options)
if response.status_code == 429:
raise RequestQuotaExceededError()
response.raise_for_status()
self.cache[ip_address] = response.json()
return self.cache[ip_address] | def function[_requestDetails, parameter[self, ip_address]]:
constant[Get IP address data by sending request to IPinfo API.]
if compare[name[ip_address] <ast.NotIn object at 0x7da2590d7190> name[self].cache] begin[:]
variable[url] assign[=] name[self].API_URL
if name[ip_address] begin[:]
<ast.AugAssign object at 0x7da18dc04a60>
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[response].status_code equal[==] constant[429]] begin[:]
<ast.Raise object at 0x7da18dc04550>
call[name[response].raise_for_status, parameter[]]
call[name[self].cache][name[ip_address]] assign[=] call[name[response].json, parameter[]]
return[call[name[self].cache][name[ip_address]]] | keyword[def] identifier[_requestDetails] ( identifier[self] , identifier[ip_address] = keyword[None] ):
literal[string]
keyword[if] identifier[ip_address] keyword[not] keyword[in] identifier[self] . identifier[cache] :
identifier[url] = identifier[self] . identifier[API_URL]
keyword[if] identifier[ip_address] :
identifier[url] += literal[string] + identifier[ip_address]
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[self] . identifier[_get_headers] (),** identifier[self] . identifier[request_options] )
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[raise] identifier[RequestQuotaExceededError] ()
identifier[response] . identifier[raise_for_status] ()
identifier[self] . identifier[cache] [ identifier[ip_address] ]= identifier[response] . identifier[json] ()
keyword[return] identifier[self] . identifier[cache] [ identifier[ip_address] ] | def _requestDetails(self, ip_address=None):
"""Get IP address data by sending request to IPinfo API."""
if ip_address not in self.cache:
url = self.API_URL
if ip_address:
url += '/' + ip_address # depends on [control=['if'], data=[]]
response = requests.get(url, headers=self._get_headers(), **self.request_options)
if response.status_code == 429:
raise RequestQuotaExceededError() # depends on [control=['if'], data=[]]
response.raise_for_status()
self.cache[ip_address] = response.json() # depends on [control=['if'], data=['ip_address']]
return self.cache[ip_address] |
def onKeyInCommandEntry(self, event):
'''Called when a key is pressed when the command entry box has focus.'''
if event.char == '\r':
self.onSendCommand()
self.canvas.focus_set() | def function[onKeyInCommandEntry, parameter[self, event]]:
constant[Called when a key is pressed when the command entry box has focus.]
if compare[name[event].char equal[==] constant[
]] begin[:]
call[name[self].onSendCommand, parameter[]]
call[name[self].canvas.focus_set, parameter[]] | keyword[def] identifier[onKeyInCommandEntry] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[char] == literal[string] :
identifier[self] . identifier[onSendCommand] ()
identifier[self] . identifier[canvas] . identifier[focus_set] () | def onKeyInCommandEntry(self, event):
"""Called when a key is pressed when the command entry box has focus."""
if event.char == '\r':
self.onSendCommand()
self.canvas.focus_set() # depends on [control=['if'], data=[]] |
def convert_long_to_wide(long_data,
ind_vars,
alt_specific_vars,
subset_specific_vars,
obs_id_col,
alt_id_col,
choice_col,
alt_name_dict=None,
null_value=np.nan):
"""
Converts a 'long format' dataframe of cross-sectional discrete choice data
into a 'wide format' version of the same data.
Parameters
----------
long_data : pandas dataframe.
Contains one row for each available alternative for each observation.
Should have the specified `[obs_id_col, alt_id_col, choice_col]` column
headings. The dtypes of all columns should be numeric.
ind_vars : list of strings.
Each element should be a column heading in `long_data` that denotes a
variable that varies across observations but not across alternatives.
alt_specific_vars : list of strings.
Each element should be a column heading in `long_data` that denotes a
variable that varies not only across observations but also across all
alternatives.
subset_specific_vars : dict.
Each key should be a string that is a column heading of `long_data`.
Each value should be a list of alternative ids denoting the subset of
alternatives which the variable (i.e. the key) over actually varies.
These variables should vary across individuals and across some
alternatives.
obs_id_col : str.
Denotes the column in `long_data` that contains the observation ID
values for each row.
alt_id_col : str.
Denotes the column in `long_data` that contains the alternative ID
values for each row.
choice_col : str.
Denotes the column in `long_data` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise.
alt_name_dict : dict or None, optional
If not None, should be a dictionary whose keys are the possible values
in `long_data[alt_id_col].unique()`. The values should be the name
that one wants to associate with each alternative id. Default == None.
null_value : int, float, long, or `np.nan`, optional.
The passed value will be used to fill cells in the wide format
dataframe when that cell is unknown for a given individual. This is
most commonly the case when there is a variable that varies across
alternatives and one of the alternatives is not available for a given
indvidual. The `null_value` will be inserted for that individual for
that variable. Default == `np.nan`.
Returns
-------
final_wide_df : pandas dataframe.
Will contain one row per observational unit. Will contain an
observation id column of the same name as `obs_id_col`. Will also
contain a choice column of the same name as `choice_col`. Will contain
one availability column per unique, observed alternative in the
dataset. Will contain one column per variable in `ind_vars`. Will
contain one column per alternative per variable in `alt_specific_vars`.
Will contain one column per specified alternative per variable in
`subset_specific_vars`.
"""
##########
# Check that all columns of long_data are being
# used in the conversion to wide format
##########
num_vars_accounted_for = sum([len(x) for x in
[ind_vars, alt_specific_vars,
subset_specific_vars,
[obs_id_col, alt_id_col, choice_col]]])
ensure_all_columns_are_used(num_vars_accounted_for, long_data)
##########
# Check that all columns one wishes to use are actually in long_data
##########
ensure_columns_are_in_dataframe(ind_vars,
long_data,
col_title="ind_vars",
data_title='long_data')
ensure_columns_are_in_dataframe(alt_specific_vars,
long_data,
col_title="alt_specific_vars",
data_title='long_data')
ensure_columns_are_in_dataframe(subset_specific_vars.keys(),
long_data,
col_title="subset_specific_vars",
data_title='long_data')
identifying_cols = [choice_col, obs_id_col, alt_id_col]
identifying_col_string = "[choice_col, obs_id_col, alt_id_col]"
ensure_columns_are_in_dataframe(identifying_cols,
long_data,
col_title=identifying_col_string,
data_title='long_data')
##########
# Make sure that each observation-alternative pair is unique
##########
check_dataframe_for_duplicate_records(obs_id_col, alt_id_col, long_data)
##########
# Make sure each observation chose an alternative that's available.
##########
# Make sure that the number of chosen alternatives equals the number of
# individuals.
ensure_num_chosen_alts_equals_num_obs(obs_id_col, choice_col, long_data)
##########
# Check that the alternative ids in the alt_name_dict are actually the
# alternative ids used in the long_data alt_id column.
##########
if alt_name_dict is not None:
check_type_and_values_of_alt_name_dict(alt_name_dict,
alt_id_col,
long_data)
##########
# Figure out how many rows/columns should be in the wide format dataframe
##########
# Note that the number of rows in wide format is the number of observations
num_obs = long_data[obs_id_col].unique().shape[0]
# Figure out the total number of possible alternatives for the dataset
num_alts = long_data[alt_id_col].unique().shape[0]
############
# Calculate the needed number of colums
############
# For each observation, there is at least one column-- the observation id,
num_cols = 1
# We should have one availability column per alternative in the dataset
num_cols += num_alts
# We should also have one column to record the choice of each observation
num_cols += 1
# We should also have one column for each individual specific variable
num_cols += len(ind_vars)
# We should also have one column for each alternative specific variable,
# for each alternative
num_cols += len(alt_specific_vars) * num_alts
# We should have one column for each subset alternative specific variable
# for each alternative over which the variable varies
for col in subset_specific_vars:
num_cols += len(subset_specific_vars[col])
##########
# Create the columns of the new dataframe
##########
#####
# Create the individual specific variable columns,
# along with the observation id column
#####
new_df = long_data[[obs_id_col] + ind_vars].drop_duplicates()
# Reset the index so that the index is not based on long_data
new_df.reset_index(inplace=True)
#####
# Create the choice column in the wide data format
#####
new_df[choice_col] = long_data.loc[long_data[choice_col] == 1,
alt_id_col].values
#####
# Create the availability columns
#####
# Get the various long form mapping matrices
mapping_res = create_long_form_mappings(long_data,
obs_id_col,
alt_id_col)
row_to_obs = mapping_res["rows_to_obs"]
row_to_alt = mapping_res["rows_to_alts"]
# Get the matrix of observations (rows) to available alternatives (columns)
obs_to_alt = row_to_obs.T.dot(row_to_alt).todense()
# Determine the unique alternative IDs in the order used in obs_to_alt
alt_id_values = long_data[alt_id_col].values
all_alternatives = np.sort(np.unique(alt_id_values))
# Create the names for the availability columns
if alt_name_dict is None:
availability_col_names = ["availability_{}".format(int(x))
for x in all_alternatives]
else:
availability_col_names = ["availability_{}".format(alt_name_dict[x])
for x in all_alternatives]
# Create a dataframe containing the availability columns for this dataset
availability_df = pd.DataFrame(obs_to_alt,
columns=availability_col_names)
#####
# Create the alternative specific and subset
# alternative specific variable columns
#####
# For each alternative specific variable, create a wide format dataframe
alt_specific_dfs = []
for col in alt_specific_vars + list(subset_specific_vars.keys()):
# Get the relevant values from the long format dataframe
relevant_vals = long_data[col].values[:, None]
# Create an wide format array of the relevant values
obs_to_var = row_to_obs.T.dot(row_to_alt.multiply(relevant_vals))
# Ensure that the wide format array is an ndarray with of dtype float
if issparse(obs_to_var):
obs_to_var = obs_to_var.toarray()
# Ensure that obs_to_var has a float dtype
obs_to_var = obs_to_var.astype(float)
# Place a null value in columns where the alternative is not available
# to a given observation
if (obs_to_alt == 0).any():
obs_to_var[np.where(obs_to_alt == 0)] = null_value
# Create column names for the alternative specific variable columns
if alt_name_dict is None:
obs_to_var_names = ["{}_{}".format(col, int(x))
for x in all_alternatives]
else:
obs_to_var_names = ["{}_{}".format(col, alt_name_dict[x])
for x in all_alternatives]
# Subset obs_to_vars and obs_to_var_names if col is in
# subset_specific_vars
if col in subset_specific_vars:
# Calculate the relevant column indices for
# the specified subset of alternatives
relevant_alt_ids = subset_specific_vars[col]
relevant_col_idx = np.where(np.in1d(all_alternatives,
relevant_alt_ids))[0]
else:
relevant_col_idx = None
# Create a dataframe containing the alternative specific variables
# or the subset alternative specific variables for the given
# variable in the long format dataframe
if relevant_col_idx is None:
obs_to_var_df = pd.DataFrame(obs_to_var,
columns=obs_to_var_names)
else:
obs_to_var_df = pd.DataFrame(obs_to_var[:, relevant_col_idx],
columns=[obs_to_var_names[x] for
x in relevant_col_idx])
# Store the current alternative specific variable columns/dataframe
alt_specific_dfs.append(obs_to_var_df)
# Combine all of the various alternative specific variable dataframes
final_alt_specific_df = pd.concat(alt_specific_dfs, axis=1)
##########
# Construct the final wide format dataframe to be returned
##########
final_wide_df = pd.concat([new_df[[obs_id_col]],
new_df[[choice_col]],
availability_df,
new_df[ind_vars],
final_alt_specific_df],
axis=1)
# Make sure one has the correct number of rows and columns in
# the final dataframe
if final_wide_df.shape != (num_obs, num_cols):
msg_1 = "There is an error with the dataframe that will be returned"
msg_2 = "The shape of the dataframe should be {}".format((num_obs,
num_cols))
msg_3 = "Instead, the returned dataframe will have shape: {}"
total_msg = msg_1 + msg_2 + msg_3.format(final_wide_df.shape)
warnings.warn(total_msg)
# Return the wide format dataframe
return final_wide_df | def function[convert_long_to_wide, parameter[long_data, ind_vars, alt_specific_vars, subset_specific_vars, obs_id_col, alt_id_col, choice_col, alt_name_dict, null_value]]:
constant[
Converts a 'long format' dataframe of cross-sectional discrete choice data
into a 'wide format' version of the same data.
Parameters
----------
long_data : pandas dataframe.
Contains one row for each available alternative for each observation.
Should have the specified `[obs_id_col, alt_id_col, choice_col]` column
headings. The dtypes of all columns should be numeric.
ind_vars : list of strings.
Each element should be a column heading in `long_data` that denotes a
variable that varies across observations but not across alternatives.
alt_specific_vars : list of strings.
Each element should be a column heading in `long_data` that denotes a
variable that varies not only across observations but also across all
alternatives.
subset_specific_vars : dict.
Each key should be a string that is a column heading of `long_data`.
Each value should be a list of alternative ids denoting the subset of
alternatives which the variable (i.e. the key) over actually varies.
These variables should vary across individuals and across some
alternatives.
obs_id_col : str.
Denotes the column in `long_data` that contains the observation ID
values for each row.
alt_id_col : str.
Denotes the column in `long_data` that contains the alternative ID
values for each row.
choice_col : str.
Denotes the column in `long_data` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise.
alt_name_dict : dict or None, optional
If not None, should be a dictionary whose keys are the possible values
in `long_data[alt_id_col].unique()`. The values should be the name
that one wants to associate with each alternative id. Default == None.
null_value : int, float, long, or `np.nan`, optional.
The passed value will be used to fill cells in the wide format
dataframe when that cell is unknown for a given individual. This is
most commonly the case when there is a variable that varies across
alternatives and one of the alternatives is not available for a given
indvidual. The `null_value` will be inserted for that individual for
that variable. Default == `np.nan`.
Returns
-------
final_wide_df : pandas dataframe.
Will contain one row per observational unit. Will contain an
observation id column of the same name as `obs_id_col`. Will also
contain a choice column of the same name as `choice_col`. Will contain
one availability column per unique, observed alternative in the
dataset. Will contain one column per variable in `ind_vars`. Will
contain one column per alternative per variable in `alt_specific_vars`.
Will contain one column per specified alternative per variable in
`subset_specific_vars`.
]
variable[num_vars_accounted_for] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b1305a20>]]
call[name[ensure_all_columns_are_used], parameter[name[num_vars_accounted_for], name[long_data]]]
call[name[ensure_columns_are_in_dataframe], parameter[name[ind_vars], name[long_data]]]
call[name[ensure_columns_are_in_dataframe], parameter[name[alt_specific_vars], name[long_data]]]
call[name[ensure_columns_are_in_dataframe], parameter[call[name[subset_specific_vars].keys, parameter[]], name[long_data]]]
variable[identifying_cols] assign[=] list[[<ast.Name object at 0x7da1b1304f10>, <ast.Name object at 0x7da1b1306950>, <ast.Name object at 0x7da1b1305990>]]
variable[identifying_col_string] assign[=] constant[[choice_col, obs_id_col, alt_id_col]]
call[name[ensure_columns_are_in_dataframe], parameter[name[identifying_cols], name[long_data]]]
call[name[check_dataframe_for_duplicate_records], parameter[name[obs_id_col], name[alt_id_col], name[long_data]]]
call[name[ensure_num_chosen_alts_equals_num_obs], parameter[name[obs_id_col], name[choice_col], name[long_data]]]
if compare[name[alt_name_dict] is_not constant[None]] begin[:]
call[name[check_type_and_values_of_alt_name_dict], parameter[name[alt_name_dict], name[alt_id_col], name[long_data]]]
variable[num_obs] assign[=] call[call[call[name[long_data]][name[obs_id_col]].unique, parameter[]].shape][constant[0]]
variable[num_alts] assign[=] call[call[call[name[long_data]][name[alt_id_col]].unique, parameter[]].shape][constant[0]]
variable[num_cols] assign[=] constant[1]
<ast.AugAssign object at 0x7da1b1307160>
<ast.AugAssign object at 0x7da1b1306ef0>
<ast.AugAssign object at 0x7da1b1305c30>
<ast.AugAssign object at 0x7da1b1304100>
for taget[name[col]] in starred[name[subset_specific_vars]] begin[:]
<ast.AugAssign object at 0x7da1b15d11b0>
variable[new_df] assign[=] call[call[name[long_data]][binary_operation[list[[<ast.Name object at 0x7da1b15d05e0>]] + name[ind_vars]]].drop_duplicates, parameter[]]
call[name[new_df].reset_index, parameter[]]
call[name[new_df]][name[choice_col]] assign[=] call[name[long_data].loc][tuple[[<ast.Compare object at 0x7da1b15d0df0>, <ast.Name object at 0x7da1b15d2f50>]]].values
variable[mapping_res] assign[=] call[name[create_long_form_mappings], parameter[name[long_data], name[obs_id_col], name[alt_id_col]]]
variable[row_to_obs] assign[=] call[name[mapping_res]][constant[rows_to_obs]]
variable[row_to_alt] assign[=] call[name[mapping_res]][constant[rows_to_alts]]
variable[obs_to_alt] assign[=] call[call[name[row_to_obs].T.dot, parameter[name[row_to_alt]]].todense, parameter[]]
variable[alt_id_values] assign[=] call[name[long_data]][name[alt_id_col]].values
variable[all_alternatives] assign[=] call[name[np].sort, parameter[call[name[np].unique, parameter[name[alt_id_values]]]]]
if compare[name[alt_name_dict] is constant[None]] begin[:]
variable[availability_col_names] assign[=] <ast.ListComp object at 0x7da1b15d19c0>
variable[availability_df] assign[=] call[name[pd].DataFrame, parameter[name[obs_to_alt]]]
variable[alt_specific_dfs] assign[=] list[[]]
for taget[name[col]] in starred[binary_operation[name[alt_specific_vars] + call[name[list], parameter[call[name[subset_specific_vars].keys, parameter[]]]]]] begin[:]
variable[relevant_vals] assign[=] call[call[name[long_data]][name[col]].values][tuple[[<ast.Slice object at 0x7da1b15d25f0>, <ast.Constant object at 0x7da1b15d2e60>]]]
variable[obs_to_var] assign[=] call[name[row_to_obs].T.dot, parameter[call[name[row_to_alt].multiply, parameter[name[relevant_vals]]]]]
if call[name[issparse], parameter[name[obs_to_var]]] begin[:]
variable[obs_to_var] assign[=] call[name[obs_to_var].toarray, parameter[]]
variable[obs_to_var] assign[=] call[name[obs_to_var].astype, parameter[name[float]]]
if call[compare[name[obs_to_alt] equal[==] constant[0]].any, parameter[]] begin[:]
call[name[obs_to_var]][call[name[np].where, parameter[compare[name[obs_to_alt] equal[==] constant[0]]]]] assign[=] name[null_value]
if compare[name[alt_name_dict] is constant[None]] begin[:]
variable[obs_to_var_names] assign[=] <ast.ListComp object at 0x7da1b15d3670>
if compare[name[col] in name[subset_specific_vars]] begin[:]
variable[relevant_alt_ids] assign[=] call[name[subset_specific_vars]][name[col]]
variable[relevant_col_idx] assign[=] call[call[name[np].where, parameter[call[name[np].in1d, parameter[name[all_alternatives], name[relevant_alt_ids]]]]]][constant[0]]
if compare[name[relevant_col_idx] is constant[None]] begin[:]
variable[obs_to_var_df] assign[=] call[name[pd].DataFrame, parameter[name[obs_to_var]]]
call[name[alt_specific_dfs].append, parameter[name[obs_to_var_df]]]
variable[final_alt_specific_df] assign[=] call[name[pd].concat, parameter[name[alt_specific_dfs]]]
variable[final_wide_df] assign[=] call[name[pd].concat, parameter[list[[<ast.Subscript object at 0x7da1b15f0790>, <ast.Subscript object at 0x7da1b15f20e0>, <ast.Name object at 0x7da1b15f3fd0>, <ast.Subscript object at 0x7da1b15f0340>, <ast.Name object at 0x7da1b15f0940>]]]]
if compare[name[final_wide_df].shape not_equal[!=] tuple[[<ast.Name object at 0x7da1b15f3a60>, <ast.Name object at 0x7da1b15f1e10>]]] begin[:]
variable[msg_1] assign[=] constant[There is an error with the dataframe that will be returned]
variable[msg_2] assign[=] call[constant[The shape of the dataframe should be {}].format, parameter[tuple[[<ast.Name object at 0x7da1b15f1de0>, <ast.Name object at 0x7da1b15f00a0>]]]]
variable[msg_3] assign[=] constant[Instead, the returned dataframe will have shape: {}]
variable[total_msg] assign[=] binary_operation[binary_operation[name[msg_1] + name[msg_2]] + call[name[msg_3].format, parameter[name[final_wide_df].shape]]]
call[name[warnings].warn, parameter[name[total_msg]]]
return[name[final_wide_df]] | keyword[def] identifier[convert_long_to_wide] ( identifier[long_data] ,
identifier[ind_vars] ,
identifier[alt_specific_vars] ,
identifier[subset_specific_vars] ,
identifier[obs_id_col] ,
identifier[alt_id_col] ,
identifier[choice_col] ,
identifier[alt_name_dict] = keyword[None] ,
identifier[null_value] = identifier[np] . identifier[nan] ):
literal[string]
identifier[num_vars_accounted_for] = identifier[sum] ([ identifier[len] ( identifier[x] ) keyword[for] identifier[x] keyword[in]
[ identifier[ind_vars] , identifier[alt_specific_vars] ,
identifier[subset_specific_vars] ,
[ identifier[obs_id_col] , identifier[alt_id_col] , identifier[choice_col] ]]])
identifier[ensure_all_columns_are_used] ( identifier[num_vars_accounted_for] , identifier[long_data] )
identifier[ensure_columns_are_in_dataframe] ( identifier[ind_vars] ,
identifier[long_data] ,
identifier[col_title] = literal[string] ,
identifier[data_title] = literal[string] )
identifier[ensure_columns_are_in_dataframe] ( identifier[alt_specific_vars] ,
identifier[long_data] ,
identifier[col_title] = literal[string] ,
identifier[data_title] = literal[string] )
identifier[ensure_columns_are_in_dataframe] ( identifier[subset_specific_vars] . identifier[keys] (),
identifier[long_data] ,
identifier[col_title] = literal[string] ,
identifier[data_title] = literal[string] )
identifier[identifying_cols] =[ identifier[choice_col] , identifier[obs_id_col] , identifier[alt_id_col] ]
identifier[identifying_col_string] = literal[string]
identifier[ensure_columns_are_in_dataframe] ( identifier[identifying_cols] ,
identifier[long_data] ,
identifier[col_title] = identifier[identifying_col_string] ,
identifier[data_title] = literal[string] )
identifier[check_dataframe_for_duplicate_records] ( identifier[obs_id_col] , identifier[alt_id_col] , identifier[long_data] )
identifier[ensure_num_chosen_alts_equals_num_obs] ( identifier[obs_id_col] , identifier[choice_col] , identifier[long_data] )
keyword[if] identifier[alt_name_dict] keyword[is] keyword[not] keyword[None] :
identifier[check_type_and_values_of_alt_name_dict] ( identifier[alt_name_dict] ,
identifier[alt_id_col] ,
identifier[long_data] )
identifier[num_obs] = identifier[long_data] [ identifier[obs_id_col] ]. identifier[unique] (). identifier[shape] [ literal[int] ]
identifier[num_alts] = identifier[long_data] [ identifier[alt_id_col] ]. identifier[unique] (). identifier[shape] [ literal[int] ]
identifier[num_cols] = literal[int]
identifier[num_cols] += identifier[num_alts]
identifier[num_cols] += literal[int]
identifier[num_cols] += identifier[len] ( identifier[ind_vars] )
identifier[num_cols] += identifier[len] ( identifier[alt_specific_vars] )* identifier[num_alts]
keyword[for] identifier[col] keyword[in] identifier[subset_specific_vars] :
identifier[num_cols] += identifier[len] ( identifier[subset_specific_vars] [ identifier[col] ])
identifier[new_df] = identifier[long_data] [[ identifier[obs_id_col] ]+ identifier[ind_vars] ]. identifier[drop_duplicates] ()
identifier[new_df] . identifier[reset_index] ( identifier[inplace] = keyword[True] )
identifier[new_df] [ identifier[choice_col] ]= identifier[long_data] . identifier[loc] [ identifier[long_data] [ identifier[choice_col] ]== literal[int] ,
identifier[alt_id_col] ]. identifier[values]
identifier[mapping_res] = identifier[create_long_form_mappings] ( identifier[long_data] ,
identifier[obs_id_col] ,
identifier[alt_id_col] )
identifier[row_to_obs] = identifier[mapping_res] [ literal[string] ]
identifier[row_to_alt] = identifier[mapping_res] [ literal[string] ]
identifier[obs_to_alt] = identifier[row_to_obs] . identifier[T] . identifier[dot] ( identifier[row_to_alt] ). identifier[todense] ()
identifier[alt_id_values] = identifier[long_data] [ identifier[alt_id_col] ]. identifier[values]
identifier[all_alternatives] = identifier[np] . identifier[sort] ( identifier[np] . identifier[unique] ( identifier[alt_id_values] ))
keyword[if] identifier[alt_name_dict] keyword[is] keyword[None] :
identifier[availability_col_names] =[ literal[string] . identifier[format] ( identifier[int] ( identifier[x] ))
keyword[for] identifier[x] keyword[in] identifier[all_alternatives] ]
keyword[else] :
identifier[availability_col_names] =[ literal[string] . identifier[format] ( identifier[alt_name_dict] [ identifier[x] ])
keyword[for] identifier[x] keyword[in] identifier[all_alternatives] ]
identifier[availability_df] = identifier[pd] . identifier[DataFrame] ( identifier[obs_to_alt] ,
identifier[columns] = identifier[availability_col_names] )
identifier[alt_specific_dfs] =[]
keyword[for] identifier[col] keyword[in] identifier[alt_specific_vars] + identifier[list] ( identifier[subset_specific_vars] . identifier[keys] ()):
identifier[relevant_vals] = identifier[long_data] [ identifier[col] ]. identifier[values] [:, keyword[None] ]
identifier[obs_to_var] = identifier[row_to_obs] . identifier[T] . identifier[dot] ( identifier[row_to_alt] . identifier[multiply] ( identifier[relevant_vals] ))
keyword[if] identifier[issparse] ( identifier[obs_to_var] ):
identifier[obs_to_var] = identifier[obs_to_var] . identifier[toarray] ()
identifier[obs_to_var] = identifier[obs_to_var] . identifier[astype] ( identifier[float] )
keyword[if] ( identifier[obs_to_alt] == literal[int] ). identifier[any] ():
identifier[obs_to_var] [ identifier[np] . identifier[where] ( identifier[obs_to_alt] == literal[int] )]= identifier[null_value]
keyword[if] identifier[alt_name_dict] keyword[is] keyword[None] :
identifier[obs_to_var_names] =[ literal[string] . identifier[format] ( identifier[col] , identifier[int] ( identifier[x] ))
keyword[for] identifier[x] keyword[in] identifier[all_alternatives] ]
keyword[else] :
identifier[obs_to_var_names] =[ literal[string] . identifier[format] ( identifier[col] , identifier[alt_name_dict] [ identifier[x] ])
keyword[for] identifier[x] keyword[in] identifier[all_alternatives] ]
keyword[if] identifier[col] keyword[in] identifier[subset_specific_vars] :
identifier[relevant_alt_ids] = identifier[subset_specific_vars] [ identifier[col] ]
identifier[relevant_col_idx] = identifier[np] . identifier[where] ( identifier[np] . identifier[in1d] ( identifier[all_alternatives] ,
identifier[relevant_alt_ids] ))[ literal[int] ]
keyword[else] :
identifier[relevant_col_idx] = keyword[None]
keyword[if] identifier[relevant_col_idx] keyword[is] keyword[None] :
identifier[obs_to_var_df] = identifier[pd] . identifier[DataFrame] ( identifier[obs_to_var] ,
identifier[columns] = identifier[obs_to_var_names] )
keyword[else] :
identifier[obs_to_var_df] = identifier[pd] . identifier[DataFrame] ( identifier[obs_to_var] [:, identifier[relevant_col_idx] ],
identifier[columns] =[ identifier[obs_to_var_names] [ identifier[x] ] keyword[for]
identifier[x] keyword[in] identifier[relevant_col_idx] ])
identifier[alt_specific_dfs] . identifier[append] ( identifier[obs_to_var_df] )
identifier[final_alt_specific_df] = identifier[pd] . identifier[concat] ( identifier[alt_specific_dfs] , identifier[axis] = literal[int] )
identifier[final_wide_df] = identifier[pd] . identifier[concat] ([ identifier[new_df] [[ identifier[obs_id_col] ]],
identifier[new_df] [[ identifier[choice_col] ]],
identifier[availability_df] ,
identifier[new_df] [ identifier[ind_vars] ],
identifier[final_alt_specific_df] ],
identifier[axis] = literal[int] )
keyword[if] identifier[final_wide_df] . identifier[shape] !=( identifier[num_obs] , identifier[num_cols] ):
identifier[msg_1] = literal[string]
identifier[msg_2] = literal[string] . identifier[format] (( identifier[num_obs] ,
identifier[num_cols] ))
identifier[msg_3] = literal[string]
identifier[total_msg] = identifier[msg_1] + identifier[msg_2] + identifier[msg_3] . identifier[format] ( identifier[final_wide_df] . identifier[shape] )
identifier[warnings] . identifier[warn] ( identifier[total_msg] )
keyword[return] identifier[final_wide_df] | def convert_long_to_wide(long_data, ind_vars, alt_specific_vars, subset_specific_vars, obs_id_col, alt_id_col, choice_col, alt_name_dict=None, null_value=np.nan):
"""
Converts a 'long format' dataframe of cross-sectional discrete choice data
into a 'wide format' version of the same data.
Parameters
----------
long_data : pandas dataframe.
Contains one row for each available alternative for each observation.
Should have the specified `[obs_id_col, alt_id_col, choice_col]` column
headings. The dtypes of all columns should be numeric.
ind_vars : list of strings.
Each element should be a column heading in `long_data` that denotes a
variable that varies across observations but not across alternatives.
alt_specific_vars : list of strings.
Each element should be a column heading in `long_data` that denotes a
variable that varies not only across observations but also across all
alternatives.
subset_specific_vars : dict.
Each key should be a string that is a column heading of `long_data`.
Each value should be a list of alternative ids denoting the subset of
alternatives which the variable (i.e. the key) over actually varies.
These variables should vary across individuals and across some
alternatives.
obs_id_col : str.
Denotes the column in `long_data` that contains the observation ID
values for each row.
alt_id_col : str.
Denotes the column in `long_data` that contains the alternative ID
values for each row.
choice_col : str.
Denotes the column in `long_data` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise.
alt_name_dict : dict or None, optional
If not None, should be a dictionary whose keys are the possible values
in `long_data[alt_id_col].unique()`. The values should be the name
that one wants to associate with each alternative id. Default == None.
null_value : int, float, long, or `np.nan`, optional.
The passed value will be used to fill cells in the wide format
dataframe when that cell is unknown for a given individual. This is
most commonly the case when there is a variable that varies across
alternatives and one of the alternatives is not available for a given
indvidual. The `null_value` will be inserted for that individual for
that variable. Default == `np.nan`.
Returns
-------
final_wide_df : pandas dataframe.
Will contain one row per observational unit. Will contain an
observation id column of the same name as `obs_id_col`. Will also
contain a choice column of the same name as `choice_col`. Will contain
one availability column per unique, observed alternative in the
dataset. Will contain one column per variable in `ind_vars`. Will
contain one column per alternative per variable in `alt_specific_vars`.
Will contain one column per specified alternative per variable in
`subset_specific_vars`.
"""
##########
# Check that all columns of long_data are being
# used in the conversion to wide format
##########
num_vars_accounted_for = sum([len(x) for x in [ind_vars, alt_specific_vars, subset_specific_vars, [obs_id_col, alt_id_col, choice_col]]])
ensure_all_columns_are_used(num_vars_accounted_for, long_data)
##########
# Check that all columns one wishes to use are actually in long_data
##########
ensure_columns_are_in_dataframe(ind_vars, long_data, col_title='ind_vars', data_title='long_data')
ensure_columns_are_in_dataframe(alt_specific_vars, long_data, col_title='alt_specific_vars', data_title='long_data')
ensure_columns_are_in_dataframe(subset_specific_vars.keys(), long_data, col_title='subset_specific_vars', data_title='long_data')
identifying_cols = [choice_col, obs_id_col, alt_id_col]
identifying_col_string = '[choice_col, obs_id_col, alt_id_col]'
ensure_columns_are_in_dataframe(identifying_cols, long_data, col_title=identifying_col_string, data_title='long_data')
##########
# Make sure that each observation-alternative pair is unique
##########
check_dataframe_for_duplicate_records(obs_id_col, alt_id_col, long_data)
##########
# Make sure each observation chose an alternative that's available.
##########
# Make sure that the number of chosen alternatives equals the number of
# individuals.
ensure_num_chosen_alts_equals_num_obs(obs_id_col, choice_col, long_data)
##########
# Check that the alternative ids in the alt_name_dict are actually the
# alternative ids used in the long_data alt_id column.
##########
if alt_name_dict is not None:
check_type_and_values_of_alt_name_dict(alt_name_dict, alt_id_col, long_data) # depends on [control=['if'], data=['alt_name_dict']]
##########
# Figure out how many rows/columns should be in the wide format dataframe
##########
# Note that the number of rows in wide format is the number of observations
num_obs = long_data[obs_id_col].unique().shape[0]
# Figure out the total number of possible alternatives for the dataset
num_alts = long_data[alt_id_col].unique().shape[0]
############
# Calculate the needed number of colums
############
# For each observation, there is at least one column-- the observation id,
num_cols = 1
# We should have one availability column per alternative in the dataset
num_cols += num_alts
# We should also have one column to record the choice of each observation
num_cols += 1
# We should also have one column for each individual specific variable
num_cols += len(ind_vars)
# We should also have one column for each alternative specific variable,
# for each alternative
num_cols += len(alt_specific_vars) * num_alts
# We should have one column for each subset alternative specific variable
# for each alternative over which the variable varies
for col in subset_specific_vars:
num_cols += len(subset_specific_vars[col]) # depends on [control=['for'], data=['col']]
##########
# Create the columns of the new dataframe
##########
#####
# Create the individual specific variable columns,
# along with the observation id column
#####
new_df = long_data[[obs_id_col] + ind_vars].drop_duplicates()
# Reset the index so that the index is not based on long_data
new_df.reset_index(inplace=True)
#####
# Create the choice column in the wide data format
#####
new_df[choice_col] = long_data.loc[long_data[choice_col] == 1, alt_id_col].values
#####
# Create the availability columns
#####
# Get the various long form mapping matrices
mapping_res = create_long_form_mappings(long_data, obs_id_col, alt_id_col)
row_to_obs = mapping_res['rows_to_obs']
row_to_alt = mapping_res['rows_to_alts']
# Get the matrix of observations (rows) to available alternatives (columns)
obs_to_alt = row_to_obs.T.dot(row_to_alt).todense()
# Determine the unique alternative IDs in the order used in obs_to_alt
alt_id_values = long_data[alt_id_col].values
all_alternatives = np.sort(np.unique(alt_id_values))
# Create the names for the availability columns
if alt_name_dict is None:
availability_col_names = ['availability_{}'.format(int(x)) for x in all_alternatives] # depends on [control=['if'], data=[]]
else:
availability_col_names = ['availability_{}'.format(alt_name_dict[x]) for x in all_alternatives]
# Create a dataframe containing the availability columns for this dataset
availability_df = pd.DataFrame(obs_to_alt, columns=availability_col_names)
#####
# Create the alternative specific and subset
# alternative specific variable columns
#####
# For each alternative specific variable, create a wide format dataframe
alt_specific_dfs = []
for col in alt_specific_vars + list(subset_specific_vars.keys()):
# Get the relevant values from the long format dataframe
relevant_vals = long_data[col].values[:, None]
# Create an wide format array of the relevant values
obs_to_var = row_to_obs.T.dot(row_to_alt.multiply(relevant_vals))
# Ensure that the wide format array is an ndarray with of dtype float
if issparse(obs_to_var):
obs_to_var = obs_to_var.toarray() # depends on [control=['if'], data=[]]
# Ensure that obs_to_var has a float dtype
obs_to_var = obs_to_var.astype(float)
# Place a null value in columns where the alternative is not available
# to a given observation
if (obs_to_alt == 0).any():
obs_to_var[np.where(obs_to_alt == 0)] = null_value # depends on [control=['if'], data=[]]
# Create column names for the alternative specific variable columns
if alt_name_dict is None:
obs_to_var_names = ['{}_{}'.format(col, int(x)) for x in all_alternatives] # depends on [control=['if'], data=[]]
else:
obs_to_var_names = ['{}_{}'.format(col, alt_name_dict[x]) for x in all_alternatives]
# Subset obs_to_vars and obs_to_var_names if col is in
# subset_specific_vars
if col in subset_specific_vars:
# Calculate the relevant column indices for
# the specified subset of alternatives
relevant_alt_ids = subset_specific_vars[col]
relevant_col_idx = np.where(np.in1d(all_alternatives, relevant_alt_ids))[0] # depends on [control=['if'], data=['col', 'subset_specific_vars']]
else:
relevant_col_idx = None
# Create a dataframe containing the alternative specific variables
# or the subset alternative specific variables for the given
# variable in the long format dataframe
if relevant_col_idx is None:
obs_to_var_df = pd.DataFrame(obs_to_var, columns=obs_to_var_names) # depends on [control=['if'], data=[]]
else:
obs_to_var_df = pd.DataFrame(obs_to_var[:, relevant_col_idx], columns=[obs_to_var_names[x] for x in relevant_col_idx])
# Store the current alternative specific variable columns/dataframe
alt_specific_dfs.append(obs_to_var_df) # depends on [control=['for'], data=['col']]
# Combine all of the various alternative specific variable dataframes
final_alt_specific_df = pd.concat(alt_specific_dfs, axis=1)
##########
# Construct the final wide format dataframe to be returned
##########
final_wide_df = pd.concat([new_df[[obs_id_col]], new_df[[choice_col]], availability_df, new_df[ind_vars], final_alt_specific_df], axis=1)
# Make sure one has the correct number of rows and columns in
# the final dataframe
if final_wide_df.shape != (num_obs, num_cols):
msg_1 = 'There is an error with the dataframe that will be returned'
msg_2 = 'The shape of the dataframe should be {}'.format((num_obs, num_cols))
msg_3 = 'Instead, the returned dataframe will have shape: {}'
total_msg = msg_1 + msg_2 + msg_3.format(final_wide_df.shape)
warnings.warn(total_msg) # depends on [control=['if'], data=[]]
# Return the wide format dataframe
return final_wide_df |
def compare_files(pyc_filename1, pyc_filename2, verify):
"""Compare two .pyc files."""
(version1, timestamp, magic_int1, code_obj1, is_pypy,
source_size) = uncompyle6.load_module(pyc_filename1)
(version2, timestamp, magic_int2, code_obj2, is_pypy,
source_size) = uncompyle6.load_module(pyc_filename2)
if (magic_int1 != magic_int2) and verify == 'verify':
verify = 'weak_verify'
cmp_code_objects(version1, is_pypy, code_obj1, code_obj2, verify) | def function[compare_files, parameter[pyc_filename1, pyc_filename2, verify]]:
constant[Compare two .pyc files.]
<ast.Tuple object at 0x7da1b1b139d0> assign[=] call[name[uncompyle6].load_module, parameter[name[pyc_filename1]]]
<ast.Tuple object at 0x7da1b1b121a0> assign[=] call[name[uncompyle6].load_module, parameter[name[pyc_filename2]]]
if <ast.BoolOp object at 0x7da1b1b106d0> begin[:]
variable[verify] assign[=] constant[weak_verify]
call[name[cmp_code_objects], parameter[name[version1], name[is_pypy], name[code_obj1], name[code_obj2], name[verify]]] | keyword[def] identifier[compare_files] ( identifier[pyc_filename1] , identifier[pyc_filename2] , identifier[verify] ):
literal[string]
( identifier[version1] , identifier[timestamp] , identifier[magic_int1] , identifier[code_obj1] , identifier[is_pypy] ,
identifier[source_size] )= identifier[uncompyle6] . identifier[load_module] ( identifier[pyc_filename1] )
( identifier[version2] , identifier[timestamp] , identifier[magic_int2] , identifier[code_obj2] , identifier[is_pypy] ,
identifier[source_size] )= identifier[uncompyle6] . identifier[load_module] ( identifier[pyc_filename2] )
keyword[if] ( identifier[magic_int1] != identifier[magic_int2] ) keyword[and] identifier[verify] == literal[string] :
identifier[verify] = literal[string]
identifier[cmp_code_objects] ( identifier[version1] , identifier[is_pypy] , identifier[code_obj1] , identifier[code_obj2] , identifier[verify] ) | def compare_files(pyc_filename1, pyc_filename2, verify):
"""Compare two .pyc files."""
(version1, timestamp, magic_int1, code_obj1, is_pypy, source_size) = uncompyle6.load_module(pyc_filename1)
(version2, timestamp, magic_int2, code_obj2, is_pypy, source_size) = uncompyle6.load_module(pyc_filename2)
if magic_int1 != magic_int2 and verify == 'verify':
verify = 'weak_verify' # depends on [control=['if'], data=[]]
cmp_code_objects(version1, is_pypy, code_obj1, code_obj2, verify) |
def recv(self, stream, crc_mode=1, retry=16, timeout=60, delay=1, quiet=0):
'''
Receive a stream via the XMODEM protocol.
>>> stream = file('/etc/issue', 'wb')
>>> print modem.recv(stream)
2342
Returns the number of bytes received on success or ``None`` in case of
failure.
'''
# initiate protocol
error_count = 0
char = 0
cancel = 0
while True:
# first try CRC mode, if this fails,
# fall back to checksum mode
if error_count >= retry:
self.abort(timeout=timeout)
return None
elif crc_mode and error_count < (retry / 2):
if not self.putc(CRC):
time.sleep(delay)
error_count += 1
else:
crc_mode = 0
if not self.putc(NAK):
time.sleep(delay)
error_count += 1
char = self.getc(1, timeout)
if not char:
error_count += 1
continue
elif char == SOH:
#crc_mode = 0
break
elif char == STX:
break
elif char == CAN:
if cancel:
return None
else:
cancel = 1
else:
error_count += 1
# read data
error_count = 0
income_size = 0
packet_size = 128
sequence = 1
cancel = 0
while True:
while True:
if char == SOH:
packet_size = 128
break
elif char == STX:
packet_size = 1024
break
elif char == EOT:
# We received an EOT, so send an ACK and return the received
# data length
self.putc(ACK)
return income_size
elif char == CAN:
# cancel at two consecutive cancels
if cancel:
return None
else:
cancel = 1
else:
if not quiet:
print >> sys.stderr, \
'recv ERROR expected SOH/EOT, got', ord(char)
error_count += 1
if error_count >= retry:
self.abort()
return None
# read sequence
error_count = 0
cancel = 0
seq1 = ord(self.getc(1))
seq2 = 0xff - ord(self.getc(1))
if seq1 == sequence and seq2 == sequence:
# sequence is ok, read packet
# packet_size + checksum
data = self.getc(packet_size + 1 + crc_mode, timeout)
if crc_mode:
csum = (ord(data[-2]) << 8) + ord(data[-1])
data = data[:-2]
log.debug('CRC (%04x <> %04x)' % \
(csum, self.calc_crc(data)))
valid = csum == self.calc_crc(data)
else:
csum = data[-1]
data = data[:-1]
log.debug('checksum (checksum(%02x <> %02x)' % \
(ord(csum), self.calc_checksum(data)))
valid = ord(csum) == self.calc_checksum(data)
# valid data, append chunk
if valid:
income_size += len(data)
stream.write(data)
self.putc(ACK)
sequence = (sequence + 1) % 0x100
char = self.getc(1, timeout)
continue
else:
# consume data
self.getc(packet_size + 1 + crc_mode)
self.debug('expecting sequence %d, got %d/%d' % \
(sequence, seq1, seq2))
# something went wrong, request retransmission
self.putc(NAK) | def function[recv, parameter[self, stream, crc_mode, retry, timeout, delay, quiet]]:
constant[
Receive a stream via the XMODEM protocol.
>>> stream = file('/etc/issue', 'wb')
>>> print modem.recv(stream)
2342
Returns the number of bytes received on success or ``None`` in case of
failure.
]
variable[error_count] assign[=] constant[0]
variable[char] assign[=] constant[0]
variable[cancel] assign[=] constant[0]
while constant[True] begin[:]
if compare[name[error_count] greater_or_equal[>=] name[retry]] begin[:]
call[name[self].abort, parameter[]]
return[constant[None]]
variable[char] assign[=] call[name[self].getc, parameter[constant[1], name[timeout]]]
if <ast.UnaryOp object at 0x7da2054a4340> begin[:]
<ast.AugAssign object at 0x7da2054a6290>
continue
variable[error_count] assign[=] constant[0]
variable[income_size] assign[=] constant[0]
variable[packet_size] assign[=] constant[128]
variable[sequence] assign[=] constant[1]
variable[cancel] assign[=] constant[0]
while constant[True] begin[:]
while constant[True] begin[:]
if compare[name[char] equal[==] name[SOH]] begin[:]
variable[packet_size] assign[=] constant[128]
break
variable[error_count] assign[=] constant[0]
variable[cancel] assign[=] constant[0]
variable[seq1] assign[=] call[name[ord], parameter[call[name[self].getc, parameter[constant[1]]]]]
variable[seq2] assign[=] binary_operation[constant[255] - call[name[ord], parameter[call[name[self].getc, parameter[constant[1]]]]]]
if <ast.BoolOp object at 0x7da2054a4d00> begin[:]
variable[data] assign[=] call[name[self].getc, parameter[binary_operation[binary_operation[name[packet_size] + constant[1]] + name[crc_mode]], name[timeout]]]
if name[crc_mode] begin[:]
variable[csum] assign[=] binary_operation[binary_operation[call[name[ord], parameter[call[name[data]][<ast.UnaryOp object at 0x7da2054a45b0>]]] <ast.LShift object at 0x7da2590d69e0> constant[8]] + call[name[ord], parameter[call[name[data]][<ast.UnaryOp object at 0x7da2054a4160>]]]]
variable[data] assign[=] call[name[data]][<ast.Slice object at 0x7da2054a5270>]
call[name[log].debug, parameter[binary_operation[constant[CRC (%04x <> %04x)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a62c0>, <ast.Call object at 0x7da2054a6680>]]]]]
variable[valid] assign[=] compare[name[csum] equal[==] call[name[self].calc_crc, parameter[name[data]]]]
if name[valid] begin[:]
<ast.AugAssign object at 0x7da18bccabf0>
call[name[stream].write, parameter[name[data]]]
call[name[self].putc, parameter[name[ACK]]]
variable[sequence] assign[=] binary_operation[binary_operation[name[sequence] + constant[1]] <ast.Mod object at 0x7da2590d6920> constant[256]]
variable[char] assign[=] call[name[self].getc, parameter[constant[1], name[timeout]]]
continue
call[name[self].putc, parameter[name[NAK]]] | keyword[def] identifier[recv] ( identifier[self] , identifier[stream] , identifier[crc_mode] = literal[int] , identifier[retry] = literal[int] , identifier[timeout] = literal[int] , identifier[delay] = literal[int] , identifier[quiet] = literal[int] ):
literal[string]
identifier[error_count] = literal[int]
identifier[char] = literal[int]
identifier[cancel] = literal[int]
keyword[while] keyword[True] :
keyword[if] identifier[error_count] >= identifier[retry] :
identifier[self] . identifier[abort] ( identifier[timeout] = identifier[timeout] )
keyword[return] keyword[None]
keyword[elif] identifier[crc_mode] keyword[and] identifier[error_count] <( identifier[retry] / literal[int] ):
keyword[if] keyword[not] identifier[self] . identifier[putc] ( identifier[CRC] ):
identifier[time] . identifier[sleep] ( identifier[delay] )
identifier[error_count] += literal[int]
keyword[else] :
identifier[crc_mode] = literal[int]
keyword[if] keyword[not] identifier[self] . identifier[putc] ( identifier[NAK] ):
identifier[time] . identifier[sleep] ( identifier[delay] )
identifier[error_count] += literal[int]
identifier[char] = identifier[self] . identifier[getc] ( literal[int] , identifier[timeout] )
keyword[if] keyword[not] identifier[char] :
identifier[error_count] += literal[int]
keyword[continue]
keyword[elif] identifier[char] == identifier[SOH] :
keyword[break]
keyword[elif] identifier[char] == identifier[STX] :
keyword[break]
keyword[elif] identifier[char] == identifier[CAN] :
keyword[if] identifier[cancel] :
keyword[return] keyword[None]
keyword[else] :
identifier[cancel] = literal[int]
keyword[else] :
identifier[error_count] += literal[int]
identifier[error_count] = literal[int]
identifier[income_size] = literal[int]
identifier[packet_size] = literal[int]
identifier[sequence] = literal[int]
identifier[cancel] = literal[int]
keyword[while] keyword[True] :
keyword[while] keyword[True] :
keyword[if] identifier[char] == identifier[SOH] :
identifier[packet_size] = literal[int]
keyword[break]
keyword[elif] identifier[char] == identifier[STX] :
identifier[packet_size] = literal[int]
keyword[break]
keyword[elif] identifier[char] == identifier[EOT] :
identifier[self] . identifier[putc] ( identifier[ACK] )
keyword[return] identifier[income_size]
keyword[elif] identifier[char] == identifier[CAN] :
keyword[if] identifier[cancel] :
keyword[return] keyword[None]
keyword[else] :
identifier[cancel] = literal[int]
keyword[else] :
keyword[if] keyword[not] identifier[quiet] :
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] , identifier[ord] ( identifier[char] )
identifier[error_count] += literal[int]
keyword[if] identifier[error_count] >= identifier[retry] :
identifier[self] . identifier[abort] ()
keyword[return] keyword[None]
identifier[error_count] = literal[int]
identifier[cancel] = literal[int]
identifier[seq1] = identifier[ord] ( identifier[self] . identifier[getc] ( literal[int] ))
identifier[seq2] = literal[int] - identifier[ord] ( identifier[self] . identifier[getc] ( literal[int] ))
keyword[if] identifier[seq1] == identifier[sequence] keyword[and] identifier[seq2] == identifier[sequence] :
identifier[data] = identifier[self] . identifier[getc] ( identifier[packet_size] + literal[int] + identifier[crc_mode] , identifier[timeout] )
keyword[if] identifier[crc_mode] :
identifier[csum] =( identifier[ord] ( identifier[data] [- literal[int] ])<< literal[int] )+ identifier[ord] ( identifier[data] [- literal[int] ])
identifier[data] = identifier[data] [:- literal[int] ]
identifier[log] . identifier[debug] ( literal[string] %( identifier[csum] , identifier[self] . identifier[calc_crc] ( identifier[data] )))
identifier[valid] = identifier[csum] == identifier[self] . identifier[calc_crc] ( identifier[data] )
keyword[else] :
identifier[csum] = identifier[data] [- literal[int] ]
identifier[data] = identifier[data] [:- literal[int] ]
identifier[log] . identifier[debug] ( literal[string] %( identifier[ord] ( identifier[csum] ), identifier[self] . identifier[calc_checksum] ( identifier[data] )))
identifier[valid] = identifier[ord] ( identifier[csum] )== identifier[self] . identifier[calc_checksum] ( identifier[data] )
keyword[if] identifier[valid] :
identifier[income_size] += identifier[len] ( identifier[data] )
identifier[stream] . identifier[write] ( identifier[data] )
identifier[self] . identifier[putc] ( identifier[ACK] )
identifier[sequence] =( identifier[sequence] + literal[int] )% literal[int]
identifier[char] = identifier[self] . identifier[getc] ( literal[int] , identifier[timeout] )
keyword[continue]
keyword[else] :
identifier[self] . identifier[getc] ( identifier[packet_size] + literal[int] + identifier[crc_mode] )
identifier[self] . identifier[debug] ( literal[string] %( identifier[sequence] , identifier[seq1] , identifier[seq2] ))
identifier[self] . identifier[putc] ( identifier[NAK] ) | def recv(self, stream, crc_mode=1, retry=16, timeout=60, delay=1, quiet=0):
"""
Receive a stream via the XMODEM protocol.
>>> stream = file('/etc/issue', 'wb')
>>> print modem.recv(stream)
2342
Returns the number of bytes received on success or ``None`` in case of
failure.
"""
# initiate protocol
error_count = 0
char = 0
cancel = 0
while True:
# first try CRC mode, if this fails,
# fall back to checksum mode
if error_count >= retry:
self.abort(timeout=timeout)
return None # depends on [control=['if'], data=[]]
elif crc_mode and error_count < retry / 2:
if not self.putc(CRC):
time.sleep(delay)
error_count += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
crc_mode = 0
if not self.putc(NAK):
time.sleep(delay)
error_count += 1 # depends on [control=['if'], data=[]]
char = self.getc(1, timeout)
if not char:
error_count += 1
continue # depends on [control=['if'], data=[]]
elif char == SOH:
#crc_mode = 0
break # depends on [control=['if'], data=[]]
elif char == STX:
break # depends on [control=['if'], data=[]]
elif char == CAN:
if cancel:
return None # depends on [control=['if'], data=[]]
else:
cancel = 1 # depends on [control=['if'], data=[]]
else:
error_count += 1 # depends on [control=['while'], data=[]]
# read data
error_count = 0
income_size = 0
packet_size = 128
sequence = 1
cancel = 0
while True:
while True:
if char == SOH:
packet_size = 128
break # depends on [control=['if'], data=[]]
elif char == STX:
packet_size = 1024
break # depends on [control=['if'], data=[]]
elif char == EOT:
# We received an EOT, so send an ACK and return the received
# data length
self.putc(ACK)
return income_size # depends on [control=['if'], data=[]]
elif char == CAN:
# cancel at two consecutive cancels
if cancel:
return None # depends on [control=['if'], data=[]]
else:
cancel = 1 # depends on [control=['if'], data=[]]
else:
if not quiet:
(print >> sys.stderr, 'recv ERROR expected SOH/EOT, got', ord(char)) # depends on [control=['if'], data=[]]
error_count += 1
if error_count >= retry:
self.abort()
return None # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# read sequence
error_count = 0
cancel = 0
seq1 = ord(self.getc(1))
seq2 = 255 - ord(self.getc(1))
if seq1 == sequence and seq2 == sequence:
# sequence is ok, read packet
# packet_size + checksum
data = self.getc(packet_size + 1 + crc_mode, timeout)
if crc_mode:
csum = (ord(data[-2]) << 8) + ord(data[-1])
data = data[:-2]
log.debug('CRC (%04x <> %04x)' % (csum, self.calc_crc(data)))
valid = csum == self.calc_crc(data) # depends on [control=['if'], data=[]]
else:
csum = data[-1]
data = data[:-1]
log.debug('checksum (checksum(%02x <> %02x)' % (ord(csum), self.calc_checksum(data)))
valid = ord(csum) == self.calc_checksum(data)
# valid data, append chunk
if valid:
income_size += len(data)
stream.write(data)
self.putc(ACK)
sequence = (sequence + 1) % 256
char = self.getc(1, timeout)
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# consume data
self.getc(packet_size + 1 + crc_mode)
self.debug('expecting sequence %d, got %d/%d' % (sequence, seq1, seq2))
# something went wrong, request retransmission
self.putc(NAK) # depends on [control=['while'], data=[]] |
def smooth(sig, window_size):
"""
Apply a uniform moving average filter to a signal
Parameters
----------
sig : numpy array
The signal to smooth.
window_size : int
The width of the moving average filter.
"""
box = np.ones(window_size)/window_size
return np.convolve(sig, box, mode='same') | def function[smooth, parameter[sig, window_size]]:
constant[
Apply a uniform moving average filter to a signal
Parameters
----------
sig : numpy array
The signal to smooth.
window_size : int
The width of the moving average filter.
]
variable[box] assign[=] binary_operation[call[name[np].ones, parameter[name[window_size]]] / name[window_size]]
return[call[name[np].convolve, parameter[name[sig], name[box]]]] | keyword[def] identifier[smooth] ( identifier[sig] , identifier[window_size] ):
literal[string]
identifier[box] = identifier[np] . identifier[ones] ( identifier[window_size] )/ identifier[window_size]
keyword[return] identifier[np] . identifier[convolve] ( identifier[sig] , identifier[box] , identifier[mode] = literal[string] ) | def smooth(sig, window_size):
"""
Apply a uniform moving average filter to a signal
Parameters
----------
sig : numpy array
The signal to smooth.
window_size : int
The width of the moving average filter.
"""
box = np.ones(window_size) / window_size
return np.convolve(sig, box, mode='same') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.