code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def rest_del(self, url, params=None, auth=None, verify=True, cert=None):
"""
Perform a DELETE request to url with optional authentication
"""
res = requests.delete(url, params=params, auth=auth, verify=verify, cert=cert)
return res.text, res.status_code | def function[rest_del, parameter[self, url, params, auth, verify, cert]]:
constant[
Perform a DELETE request to url with optional authentication
]
variable[res] assign[=] call[name[requests].delete, parameter[name[url]]]
return[tuple[[<ast.Attribute object at 0x7da1b0f43880>, <ast.Attribute object at 0x7da1b0f416c0>]]] | keyword[def] identifier[rest_del] ( identifier[self] , identifier[url] , identifier[params] = keyword[None] , identifier[auth] = keyword[None] , identifier[verify] = keyword[True] , identifier[cert] = keyword[None] ):
literal[string]
identifier[res] = identifier[requests] . identifier[delete] ( identifier[url] , identifier[params] = identifier[params] , identifier[auth] = identifier[auth] , identifier[verify] = identifier[verify] , identifier[cert] = identifier[cert] )
keyword[return] identifier[res] . identifier[text] , identifier[res] . identifier[status_code] | def rest_del(self, url, params=None, auth=None, verify=True, cert=None):
"""
Perform a DELETE request to url with optional authentication
"""
res = requests.delete(url, params=params, auth=auth, verify=verify, cert=cert)
return (res.text, res.status_code) |
def labeled_intervals(intervals, labels, label_set=None,
base=None, height=None, extend_labels=True,
ax=None, tick=True, **kwargs):
'''Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
if label_set is None:
# If we have non-empty pre-existing tick labels, use them
label_set = [_.get_text() for _ in ax.get_yticklabels()]
# If none of the label strings have content, treat it as empty
if not any(label_set):
label_set = []
else:
label_set = list(label_set)
# Put additional labels at the end, in order
if extend_labels:
ticks = label_set + sorted(set(labels) - set(label_set))
elif label_set:
ticks = label_set
else:
ticks = sorted(set(labels))
style = dict(linewidth=1)
style.update(next(ax._get_patches_for_fill.prop_cycler))
# Swap color -> facecolor here so we preserve edgecolor on rects
style['facecolor'] = style.pop('color')
style.update(kwargs)
if base is None:
base = np.arange(len(ticks))
if height is None:
height = 1
if np.isscalar(height):
height = height * np.ones_like(base)
seg_y = dict()
for ybase, yheight, lab in zip(base, height, ticks):
seg_y[lab] = (ybase, yheight)
xvals = defaultdict(list)
for ival, lab in zip(intervals, labels):
if lab not in seg_y:
continue
xvals[lab].append((ival[0], ival[1] - ival[0]))
for lab in seg_y:
ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab],
**style))
# Pop the label after the first time we see it, so we only get
# one legend entry
style.pop('label', None)
# Draw a line separating the new labels from pre-existing labels
if label_set != ticks:
ax.axhline(len(label_set), color='k', alpha=0.5)
if tick:
ax.grid(True, axis='y')
ax.set_yticks([])
ax.set_yticks(base)
ax.set_yticklabels(ticks, va='bottom')
ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks))
if base.size:
__expand_limits(ax, [base.min(), (base + height).max()], which='y')
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax | def function[labeled_intervals, parameter[intervals, labels, label_set, base, height, extend_labels, ax, tick]]:
constant[Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
]
<ast.Tuple object at 0x7da2043454b0> assign[=] call[name[__get_axes], parameter[]]
variable[intervals] assign[=] call[name[np].atleast_2d, parameter[name[intervals]]]
if compare[name[label_set] is constant[None]] begin[:]
variable[label_set] assign[=] <ast.ListComp object at 0x7da204346560>
if <ast.UnaryOp object at 0x7da204346c80> begin[:]
variable[label_set] assign[=] list[[]]
if name[extend_labels] begin[:]
variable[ticks] assign[=] binary_operation[name[label_set] + call[name[sorted], parameter[binary_operation[call[name[set], parameter[name[labels]]] - call[name[set], parameter[name[label_set]]]]]]]
variable[style] assign[=] call[name[dict], parameter[]]
call[name[style].update, parameter[call[name[next], parameter[name[ax]._get_patches_for_fill.prop_cycler]]]]
call[name[style]][constant[facecolor]] assign[=] call[name[style].pop, parameter[constant[color]]]
call[name[style].update, parameter[name[kwargs]]]
if compare[name[base] is constant[None]] begin[:]
variable[base] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[ticks]]]]]
if compare[name[height] is constant[None]] begin[:]
variable[height] assign[=] constant[1]
if call[name[np].isscalar, parameter[name[height]]] begin[:]
variable[height] assign[=] binary_operation[name[height] * call[name[np].ones_like, parameter[name[base]]]]
variable[seg_y] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6c78b0>, <ast.Name object at 0x7da20c6c46d0>, <ast.Name object at 0x7da20c6c7790>]]] in starred[call[name[zip], parameter[name[base], name[height], name[ticks]]]] begin[:]
call[name[seg_y]][name[lab]] assign[=] tuple[[<ast.Name object at 0x7da20c6c6950>, <ast.Name object at 0x7da20c6c7a60>]]
variable[xvals] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da20c6c6a70>, <ast.Name object at 0x7da20c6c7460>]]] in starred[call[name[zip], parameter[name[intervals], name[labels]]]] begin[:]
if compare[name[lab] <ast.NotIn object at 0x7da2590d7190> name[seg_y]] begin[:]
continue
call[call[name[xvals]][name[lab]].append, parameter[tuple[[<ast.Subscript object at 0x7da20c6c4c40>, <ast.BinOp object at 0x7da20c6c5c90>]]]]
for taget[name[lab]] in starred[name[seg_y]] begin[:]
call[name[ax].add_collection, parameter[call[name[BrokenBarHCollection], parameter[call[name[xvals]][name[lab]], call[name[seg_y]][name[lab]]]]]]
call[name[style].pop, parameter[constant[label], constant[None]]]
if compare[name[label_set] not_equal[!=] name[ticks]] begin[:]
call[name[ax].axhline, parameter[call[name[len], parameter[name[label_set]]]]]
if name[tick] begin[:]
call[name[ax].grid, parameter[constant[True]]]
call[name[ax].set_yticks, parameter[list[[]]]]
call[name[ax].set_yticks, parameter[name[base]]]
call[name[ax].set_yticklabels, parameter[name[ticks]]]
call[name[ax].yaxis.set_major_formatter, parameter[call[name[IntervalFormatter], parameter[name[base], name[ticks]]]]]
if name[base].size begin[:]
call[name[__expand_limits], parameter[name[ax], list[[<ast.Call object at 0x7da20c6c49d0>, <ast.Call object at 0x7da20c6c69e0>]]]]
if name[intervals].size begin[:]
call[name[__expand_limits], parameter[name[ax], list[[<ast.Call object at 0x7da20c6c7d60>, <ast.Call object at 0x7da20c6c7ca0>]]]]
return[name[ax]] | keyword[def] identifier[labeled_intervals] ( identifier[intervals] , identifier[labels] , identifier[label_set] = keyword[None] ,
identifier[base] = keyword[None] , identifier[height] = keyword[None] , identifier[extend_labels] = keyword[True] ,
identifier[ax] = keyword[None] , identifier[tick] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[ax] , identifier[_] = identifier[__get_axes] ( identifier[ax] = identifier[ax] )
identifier[intervals] = identifier[np] . identifier[atleast_2d] ( identifier[intervals] )
keyword[if] identifier[label_set] keyword[is] keyword[None] :
identifier[label_set] =[ identifier[_] . identifier[get_text] () keyword[for] identifier[_] keyword[in] identifier[ax] . identifier[get_yticklabels] ()]
keyword[if] keyword[not] identifier[any] ( identifier[label_set] ):
identifier[label_set] =[]
keyword[else] :
identifier[label_set] = identifier[list] ( identifier[label_set] )
keyword[if] identifier[extend_labels] :
identifier[ticks] = identifier[label_set] + identifier[sorted] ( identifier[set] ( identifier[labels] )- identifier[set] ( identifier[label_set] ))
keyword[elif] identifier[label_set] :
identifier[ticks] = identifier[label_set]
keyword[else] :
identifier[ticks] = identifier[sorted] ( identifier[set] ( identifier[labels] ))
identifier[style] = identifier[dict] ( identifier[linewidth] = literal[int] )
identifier[style] . identifier[update] ( identifier[next] ( identifier[ax] . identifier[_get_patches_for_fill] . identifier[prop_cycler] ))
identifier[style] [ literal[string] ]= identifier[style] . identifier[pop] ( literal[string] )
identifier[style] . identifier[update] ( identifier[kwargs] )
keyword[if] identifier[base] keyword[is] keyword[None] :
identifier[base] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[ticks] ))
keyword[if] identifier[height] keyword[is] keyword[None] :
identifier[height] = literal[int]
keyword[if] identifier[np] . identifier[isscalar] ( identifier[height] ):
identifier[height] = identifier[height] * identifier[np] . identifier[ones_like] ( identifier[base] )
identifier[seg_y] = identifier[dict] ()
keyword[for] identifier[ybase] , identifier[yheight] , identifier[lab] keyword[in] identifier[zip] ( identifier[base] , identifier[height] , identifier[ticks] ):
identifier[seg_y] [ identifier[lab] ]=( identifier[ybase] , identifier[yheight] )
identifier[xvals] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[ival] , identifier[lab] keyword[in] identifier[zip] ( identifier[intervals] , identifier[labels] ):
keyword[if] identifier[lab] keyword[not] keyword[in] identifier[seg_y] :
keyword[continue]
identifier[xvals] [ identifier[lab] ]. identifier[append] (( identifier[ival] [ literal[int] ], identifier[ival] [ literal[int] ]- identifier[ival] [ literal[int] ]))
keyword[for] identifier[lab] keyword[in] identifier[seg_y] :
identifier[ax] . identifier[add_collection] ( identifier[BrokenBarHCollection] ( identifier[xvals] [ identifier[lab] ], identifier[seg_y] [ identifier[lab] ],
** identifier[style] ))
identifier[style] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[label_set] != identifier[ticks] :
identifier[ax] . identifier[axhline] ( identifier[len] ( identifier[label_set] ), identifier[color] = literal[string] , identifier[alpha] = literal[int] )
keyword[if] identifier[tick] :
identifier[ax] . identifier[grid] ( keyword[True] , identifier[axis] = literal[string] )
identifier[ax] . identifier[set_yticks] ([])
identifier[ax] . identifier[set_yticks] ( identifier[base] )
identifier[ax] . identifier[set_yticklabels] ( identifier[ticks] , identifier[va] = literal[string] )
identifier[ax] . identifier[yaxis] . identifier[set_major_formatter] ( identifier[IntervalFormatter] ( identifier[base] , identifier[ticks] ))
keyword[if] identifier[base] . identifier[size] :
identifier[__expand_limits] ( identifier[ax] ,[ identifier[base] . identifier[min] (),( identifier[base] + identifier[height] ). identifier[max] ()], identifier[which] = literal[string] )
keyword[if] identifier[intervals] . identifier[size] :
identifier[__expand_limits] ( identifier[ax] ,[ identifier[intervals] . identifier[min] (), identifier[intervals] . identifier[max] ()], identifier[which] = literal[string] )
keyword[return] identifier[ax] | def labeled_intervals(intervals, labels, label_set=None, base=None, height=None, extend_labels=True, ax=None, tick=True, **kwargs):
"""Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
"""
# Get the axes handle
(ax, _) = __get_axes(ax=ax)
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
if label_set is None:
# If we have non-empty pre-existing tick labels, use them
label_set = [_.get_text() for _ in ax.get_yticklabels()]
# If none of the label strings have content, treat it as empty
if not any(label_set):
label_set = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['label_set']]
else:
label_set = list(label_set)
# Put additional labels at the end, in order
if extend_labels:
ticks = label_set + sorted(set(labels) - set(label_set)) # depends on [control=['if'], data=[]]
elif label_set:
ticks = label_set # depends on [control=['if'], data=[]]
else:
ticks = sorted(set(labels))
style = dict(linewidth=1)
style.update(next(ax._get_patches_for_fill.prop_cycler))
# Swap color -> facecolor here so we preserve edgecolor on rects
style['facecolor'] = style.pop('color')
style.update(kwargs)
if base is None:
base = np.arange(len(ticks)) # depends on [control=['if'], data=['base']]
if height is None:
height = 1 # depends on [control=['if'], data=['height']]
if np.isscalar(height):
height = height * np.ones_like(base) # depends on [control=['if'], data=[]]
seg_y = dict()
for (ybase, yheight, lab) in zip(base, height, ticks):
seg_y[lab] = (ybase, yheight) # depends on [control=['for'], data=[]]
xvals = defaultdict(list)
for (ival, lab) in zip(intervals, labels):
if lab not in seg_y:
continue # depends on [control=['if'], data=[]]
xvals[lab].append((ival[0], ival[1] - ival[0])) # depends on [control=['for'], data=[]]
for lab in seg_y:
ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab], **style))
# Pop the label after the first time we see it, so we only get
# one legend entry
style.pop('label', None) # depends on [control=['for'], data=['lab']]
# Draw a line separating the new labels from pre-existing labels
if label_set != ticks:
ax.axhline(len(label_set), color='k', alpha=0.5) # depends on [control=['if'], data=['label_set']]
if tick:
ax.grid(True, axis='y')
ax.set_yticks([])
ax.set_yticks(base)
ax.set_yticklabels(ticks, va='bottom')
ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks)) # depends on [control=['if'], data=[]]
if base.size:
__expand_limits(ax, [base.min(), (base + height).max()], which='y') # depends on [control=['if'], data=[]]
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x') # depends on [control=['if'], data=[]]
return ax |
def is_broken_link(url):
"""Determine whether the link returns a 404 error."""
try:
request = urllib_request.Request(
url, headers={'User-Agent': 'Mozilla/5.0'})
urllib_request.urlopen(request).read()
return False
except urllib_request.URLError:
return True
except SocketError:
return True | def function[is_broken_link, parameter[url]]:
constant[Determine whether the link returns a 404 error.]
<ast.Try object at 0x7da1b0868f40> | keyword[def] identifier[is_broken_link] ( identifier[url] ):
literal[string]
keyword[try] :
identifier[request] = identifier[urllib_request] . identifier[Request] (
identifier[url] , identifier[headers] ={ literal[string] : literal[string] })
identifier[urllib_request] . identifier[urlopen] ( identifier[request] ). identifier[read] ()
keyword[return] keyword[False]
keyword[except] identifier[urllib_request] . identifier[URLError] :
keyword[return] keyword[True]
keyword[except] identifier[SocketError] :
keyword[return] keyword[True] | def is_broken_link(url):
"""Determine whether the link returns a 404 error."""
try:
request = urllib_request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
urllib_request.urlopen(request).read()
return False # depends on [control=['try'], data=[]]
except urllib_request.URLError:
return True # depends on [control=['except'], data=[]]
except SocketError:
return True # depends on [control=['except'], data=[]] |
def info(vm, info_type='all', key='uuid'):
'''
Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias
'''
ret = {}
if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']:
ret['Error'] = 'Requested info_type is not available'
return ret
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm
# vmadm info <uuid> [type,...]
cmd = 'vmadm info {uuid} {type}'.format(
uuid=vm,
type=info_type
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return salt.utils.json.loads(res['stdout']) | def function[info, parameter[vm, info_type, key]]:
constant[
Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias
]
variable[ret] assign[=] dictionary[[], []]
if compare[name[info_type] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18dc984f0>, <ast.Constant object at 0x7da18dc994b0>, <ast.Constant object at 0x7da18dc9b490>, <ast.Constant object at 0x7da18dc997e0>, <ast.Constant object at 0x7da18dc9b670>, <ast.Constant object at 0x7da18dc9aec0>, <ast.Constant object at 0x7da18dc9a230>, <ast.Constant object at 0x7da18dc9a620>, <ast.Constant object at 0x7da18dc99d20>, <ast.Constant object at 0x7da18dc99ae0>]]] begin[:]
call[name[ret]][constant[Error]] assign[=] constant[Requested info_type is not available]
return[name[ret]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da2044c29e0>, <ast.Constant object at 0x7da2044c39a0>, <ast.Constant object at 0x7da2044c16c0>]]] begin[:]
call[name[ret]][constant[Error]] assign[=] constant[Key must be either uuid, alias or hostname]
return[name[ret]]
variable[vm] assign[=] call[name[lookup], parameter[call[constant[{0}={1}].format, parameter[name[key], name[vm]]]]]
if compare[constant[Error] in name[vm]] begin[:]
return[name[vm]]
variable[cmd] assign[=] call[constant[vmadm info {uuid} {type}].format, parameter[]]
variable[res] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
variable[retcode] assign[=] call[name[res]][constant[retcode]]
if compare[name[retcode] not_equal[!=] constant[0]] begin[:]
call[name[ret]][constant[Error]] assign[=] <ast.IfExp object at 0x7da2044c3100>
return[name[ret]]
return[call[name[salt].utils.json.loads, parameter[call[name[res]][constant[stdout]]]]] | keyword[def] identifier[info] ( identifier[vm] , identifier[info_type] = literal[string] , identifier[key] = literal[string] ):
literal[string]
identifier[ret] ={}
keyword[if] identifier[info_type] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] identifier[key] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[vm] = identifier[lookup] ( literal[string] . identifier[format] ( identifier[key] , identifier[vm] ), identifier[one] = keyword[True] )
keyword[if] literal[string] keyword[in] identifier[vm] :
keyword[return] identifier[vm]
identifier[cmd] = literal[string] . identifier[format] (
identifier[uuid] = identifier[vm] ,
identifier[type] = identifier[info_type]
)
identifier[res] = identifier[__salt__] [ literal[string] ]( identifier[cmd] )
identifier[retcode] = identifier[res] [ literal[string] ]
keyword[if] identifier[retcode] != literal[int] :
identifier[ret] [ literal[string] ]= identifier[res] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[res] keyword[else] identifier[_exit_status] ( identifier[retcode] )
keyword[return] identifier[ret]
keyword[return] identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[res] [ literal[string] ]) | def info(vm, info_type='all', key='uuid'):
"""
Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias
"""
ret = {}
if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']:
ret['Error'] = 'Requested info_type is not available'
return ret # depends on [control=['if'], data=[]]
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret # depends on [control=['if'], data=[]]
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm # depends on [control=['if'], data=['vm']]
# vmadm info <uuid> [type,...]
cmd = 'vmadm info {uuid} {type}'.format(uuid=vm, type=info_type)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret # depends on [control=['if'], data=['retcode']]
return salt.utils.json.loads(res['stdout']) |
def url_mod(url: str, new_params: dict) -> str:
"""
Modifies existing URL by setting/overriding specified query string parameters.
Note: Does not support multiple querystring parameters with identical name.
:param url: Base URL/path to modify
:param new_params: Querystring parameters to set/override (dict)
:return: New URL/path
"""
from urllib.parse import urlparse, parse_qsl, urlunparse, urlencode
res = urlparse(url)
query_params = dict(parse_qsl(res.query))
for k, v in new_params.items():
if v is None:
query_params[str(k)] = ''
else:
query_params[str(k)] = str(v)
parts = list(res)
parts[4] = urlencode(query_params)
return urlunparse(parts) | def function[url_mod, parameter[url, new_params]]:
constant[
Modifies existing URL by setting/overriding specified query string parameters.
Note: Does not support multiple querystring parameters with identical name.
:param url: Base URL/path to modify
:param new_params: Querystring parameters to set/override (dict)
:return: New URL/path
]
from relative_module[urllib.parse] import module[urlparse], module[parse_qsl], module[urlunparse], module[urlencode]
variable[res] assign[=] call[name[urlparse], parameter[name[url]]]
variable[query_params] assign[=] call[name[dict], parameter[call[name[parse_qsl], parameter[name[res].query]]]]
for taget[tuple[[<ast.Name object at 0x7da1b10c2bf0>, <ast.Name object at 0x7da1b10c3340>]]] in starred[call[name[new_params].items, parameter[]]] begin[:]
if compare[name[v] is constant[None]] begin[:]
call[name[query_params]][call[name[str], parameter[name[k]]]] assign[=] constant[]
variable[parts] assign[=] call[name[list], parameter[name[res]]]
call[name[parts]][constant[4]] assign[=] call[name[urlencode], parameter[name[query_params]]]
return[call[name[urlunparse], parameter[name[parts]]]] | keyword[def] identifier[url_mod] ( identifier[url] : identifier[str] , identifier[new_params] : identifier[dict] )-> identifier[str] :
literal[string]
keyword[from] identifier[urllib] . identifier[parse] keyword[import] identifier[urlparse] , identifier[parse_qsl] , identifier[urlunparse] , identifier[urlencode]
identifier[res] = identifier[urlparse] ( identifier[url] )
identifier[query_params] = identifier[dict] ( identifier[parse_qsl] ( identifier[res] . identifier[query] ))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[new_params] . identifier[items] ():
keyword[if] identifier[v] keyword[is] keyword[None] :
identifier[query_params] [ identifier[str] ( identifier[k] )]= literal[string]
keyword[else] :
identifier[query_params] [ identifier[str] ( identifier[k] )]= identifier[str] ( identifier[v] )
identifier[parts] = identifier[list] ( identifier[res] )
identifier[parts] [ literal[int] ]= identifier[urlencode] ( identifier[query_params] )
keyword[return] identifier[urlunparse] ( identifier[parts] ) | def url_mod(url: str, new_params: dict) -> str:
"""
Modifies existing URL by setting/overriding specified query string parameters.
Note: Does not support multiple querystring parameters with identical name.
:param url: Base URL/path to modify
:param new_params: Querystring parameters to set/override (dict)
:return: New URL/path
"""
from urllib.parse import urlparse, parse_qsl, urlunparse, urlencode
res = urlparse(url)
query_params = dict(parse_qsl(res.query))
for (k, v) in new_params.items():
if v is None:
query_params[str(k)] = '' # depends on [control=['if'], data=[]]
else:
query_params[str(k)] = str(v) # depends on [control=['for'], data=[]]
parts = list(res)
parts[4] = urlencode(query_params)
return urlunparse(parts) |
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Delete an existing record.
If the record doesn't exist, does nothing.
"""
if not identifier:
records = self._list_records(rtype, name, content)
identifiers = [record["id"] for record in records]
else:
identifiers = [identifier]
LOGGER.debug("delete_records: %s", identifiers)
for record_id in identifiers:
self._delete(
"/v1/domains/{0}/records/{1}".format(
self.domain, record_id
)
)
LOGGER.debug("delete_record: %s", record_id)
LOGGER.debug("delete_record: %s", True)
return True | def function[_delete_record, parameter[self, identifier, rtype, name, content]]:
constant[Delete an existing record.
If the record doesn't exist, does nothing.
]
if <ast.UnaryOp object at 0x7da1b1d5c100> begin[:]
variable[records] assign[=] call[name[self]._list_records, parameter[name[rtype], name[name], name[content]]]
variable[identifiers] assign[=] <ast.ListComp object at 0x7da1b1d5cc10>
call[name[LOGGER].debug, parameter[constant[delete_records: %s], name[identifiers]]]
for taget[name[record_id]] in starred[name[identifiers]] begin[:]
call[name[self]._delete, parameter[call[constant[/v1/domains/{0}/records/{1}].format, parameter[name[self].domain, name[record_id]]]]]
call[name[LOGGER].debug, parameter[constant[delete_record: %s], name[record_id]]]
call[name[LOGGER].debug, parameter[constant[delete_record: %s], constant[True]]]
return[constant[True]] | keyword[def] identifier[_delete_record] ( identifier[self] , identifier[identifier] = keyword[None] , identifier[rtype] = keyword[None] , identifier[name] = keyword[None] , identifier[content] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[identifier] :
identifier[records] = identifier[self] . identifier[_list_records] ( identifier[rtype] , identifier[name] , identifier[content] )
identifier[identifiers] =[ identifier[record] [ literal[string] ] keyword[for] identifier[record] keyword[in] identifier[records] ]
keyword[else] :
identifier[identifiers] =[ identifier[identifier] ]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[identifiers] )
keyword[for] identifier[record_id] keyword[in] identifier[identifiers] :
identifier[self] . identifier[_delete] (
literal[string] . identifier[format] (
identifier[self] . identifier[domain] , identifier[record_id]
)
)
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[record_id] )
identifier[LOGGER] . identifier[debug] ( literal[string] , keyword[True] )
keyword[return] keyword[True] | def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Delete an existing record.
If the record doesn't exist, does nothing.
"""
if not identifier:
records = self._list_records(rtype, name, content)
identifiers = [record['id'] for record in records] # depends on [control=['if'], data=[]]
else:
identifiers = [identifier]
LOGGER.debug('delete_records: %s', identifiers)
for record_id in identifiers:
self._delete('/v1/domains/{0}/records/{1}'.format(self.domain, record_id))
LOGGER.debug('delete_record: %s', record_id) # depends on [control=['for'], data=['record_id']]
LOGGER.debug('delete_record: %s', True)
return True |
def get_size(vm_):
'''
Return the VM's size. Used by create_node().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
for size in sizes:
if vm_size.lower() == sizes[size]['slug']:
return sizes[size]['slug']
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
) | def function[get_size, parameter[vm_]]:
constant[
Return the VM's size. Used by create_node().
]
variable[sizes] assign[=] call[name[avail_sizes], parameter[]]
variable[vm_size] assign[=] call[name[six].text_type, parameter[call[name[config].get_cloud_config_value, parameter[constant[size], name[vm_], name[__opts__]]]]]
for taget[name[size]] in starred[name[sizes]] begin[:]
if compare[call[name[vm_size].lower, parameter[]] equal[==] call[call[name[sizes]][name[size]]][constant[slug]]] begin[:]
return[call[call[name[sizes]][name[size]]][constant[slug]]]
<ast.Raise object at 0x7da1b1f49720> | keyword[def] identifier[get_size] ( identifier[vm_] ):
literal[string]
identifier[sizes] = identifier[avail_sizes] ()
identifier[vm_size] = identifier[six] . identifier[text_type] ( identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[search_global] = keyword[False]
))
keyword[for] identifier[size] keyword[in] identifier[sizes] :
keyword[if] identifier[vm_size] . identifier[lower] ()== identifier[sizes] [ identifier[size] ][ literal[string] ]:
keyword[return] identifier[sizes] [ identifier[size] ][ literal[string] ]
keyword[raise] identifier[SaltCloudNotFound] (
literal[string] . identifier[format] ( identifier[vm_size] )
) | def get_size(vm_):
"""
Return the VM's size. Used by create_node().
"""
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value('size', vm_, __opts__, search_global=False))
for size in sizes:
if vm_size.lower() == sizes[size]['slug']:
return sizes[size]['slug'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['size']]
raise SaltCloudNotFound("The specified size, '{0}', could not be found.".format(vm_size)) |
def _set_mode(self):
"""
set permission bits if needed using python API os.chmod
:return: None
"""
if self.mode is not None:
logger.debug("changing permission bits of %s to %s", self.path, oct(self.mode))
os.chmod(self.path, self.mode) | def function[_set_mode, parameter[self]]:
constant[
set permission bits if needed using python API os.chmod
:return: None
]
if compare[name[self].mode is_not constant[None]] begin[:]
call[name[logger].debug, parameter[constant[changing permission bits of %s to %s], name[self].path, call[name[oct], parameter[name[self].mode]]]]
call[name[os].chmod, parameter[name[self].path, name[self].mode]] | keyword[def] identifier[_set_mode] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[mode] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[path] , identifier[oct] ( identifier[self] . identifier[mode] ))
identifier[os] . identifier[chmod] ( identifier[self] . identifier[path] , identifier[self] . identifier[mode] ) | def _set_mode(self):
"""
set permission bits if needed using python API os.chmod
:return: None
"""
if self.mode is not None:
logger.debug('changing permission bits of %s to %s', self.path, oct(self.mode))
os.chmod(self.path, self.mode) # depends on [control=['if'], data=[]] |
def put(self, local, remote, sudo=False):
"""Copy local file to host via SFTP/SCP
Copy is done natively using SFTP/SCP version 2 protocol, no scp command
is used or required.
"""
if(os.path.isdir(local)):
self.put_dir(local, remote, sudo=sudo)
else:
self.put_single(local, remote, sudo=sudo) | def function[put, parameter[self, local, remote, sudo]]:
constant[Copy local file to host via SFTP/SCP
Copy is done natively using SFTP/SCP version 2 protocol, no scp command
is used or required.
]
if call[name[os].path.isdir, parameter[name[local]]] begin[:]
call[name[self].put_dir, parameter[name[local], name[remote]]] | keyword[def] identifier[put] ( identifier[self] , identifier[local] , identifier[remote] , identifier[sudo] = keyword[False] ):
literal[string]
keyword[if] ( identifier[os] . identifier[path] . identifier[isdir] ( identifier[local] )):
identifier[self] . identifier[put_dir] ( identifier[local] , identifier[remote] , identifier[sudo] = identifier[sudo] )
keyword[else] :
identifier[self] . identifier[put_single] ( identifier[local] , identifier[remote] , identifier[sudo] = identifier[sudo] ) | def put(self, local, remote, sudo=False):
"""Copy local file to host via SFTP/SCP
Copy is done natively using SFTP/SCP version 2 protocol, no scp command
is used or required.
"""
if os.path.isdir(local):
self.put_dir(local, remote, sudo=sudo) # depends on [control=['if'], data=[]]
else:
self.put_single(local, remote, sudo=sudo) |
def get(self, key, default=miss):
"""Return the value for given key if it exists."""
if key not in self._dict:
return default
# invokes __getitem__, which updates the item
return self[key] | def function[get, parameter[self, key, default]]:
constant[Return the value for given key if it exists.]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self]._dict] begin[:]
return[name[default]]
return[call[name[self]][name[key]]] | keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[default] = identifier[miss] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[_dict] :
keyword[return] identifier[default]
keyword[return] identifier[self] [ identifier[key] ] | def get(self, key, default=miss):
"""Return the value for given key if it exists."""
if key not in self._dict:
return default # depends on [control=['if'], data=[]]
# invokes __getitem__, which updates the item
return self[key] |
def refresh(self, *objects, **kwargs):
'''
This method is an alternate API for refreshing many entities (possibly
not tracked by the session). You can call::
session.refresh(obj)
session.refresh(obj1, obj2, ...)
session.refresh([obj1, obj2, ...])
And all provided entities will be reloaded from Redis.
To force reloading for modified entities, you can pass ``force=True``.
'''
self._init()
from rom import Model
force = kwargs.get('force')
for o in objects:
if isinstance(o, (list, tuple)):
self.refresh(*o, force=force)
elif isinstance(o, Model):
if not o._new:
o.refresh(force=force)
else:
# all objects are re-added to the session after refresh,
# except for deleted entities...
self.add(o)
else:
raise ORMError(
"Cannot refresh an object that is not an instance of a Model (you provided %r)"%(
o,)) | def function[refresh, parameter[self]]:
constant[
This method is an alternate API for refreshing many entities (possibly
not tracked by the session). You can call::
session.refresh(obj)
session.refresh(obj1, obj2, ...)
session.refresh([obj1, obj2, ...])
And all provided entities will be reloaded from Redis.
To force reloading for modified entities, you can pass ``force=True``.
]
call[name[self]._init, parameter[]]
from relative_module[rom] import module[Model]
variable[force] assign[=] call[name[kwargs].get, parameter[constant[force]]]
for taget[name[o]] in starred[name[objects]] begin[:]
if call[name[isinstance], parameter[name[o], tuple[[<ast.Name object at 0x7da1b05c57b0>, <ast.Name object at 0x7da1b05c7ac0>]]]] begin[:]
call[name[self].refresh, parameter[<ast.Starred object at 0x7da1b05c5930>]] | keyword[def] identifier[refresh] ( identifier[self] ,* identifier[objects] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_init] ()
keyword[from] identifier[rom] keyword[import] identifier[Model]
identifier[force] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[for] identifier[o] keyword[in] identifier[objects] :
keyword[if] identifier[isinstance] ( identifier[o] ,( identifier[list] , identifier[tuple] )):
identifier[self] . identifier[refresh] (* identifier[o] , identifier[force] = identifier[force] )
keyword[elif] identifier[isinstance] ( identifier[o] , identifier[Model] ):
keyword[if] keyword[not] identifier[o] . identifier[_new] :
identifier[o] . identifier[refresh] ( identifier[force] = identifier[force] )
keyword[else] :
identifier[self] . identifier[add] ( identifier[o] )
keyword[else] :
keyword[raise] identifier[ORMError] (
literal[string] %(
identifier[o] ,)) | def refresh(self, *objects, **kwargs):
"""
This method is an alternate API for refreshing many entities (possibly
not tracked by the session). You can call::
session.refresh(obj)
session.refresh(obj1, obj2, ...)
session.refresh([obj1, obj2, ...])
And all provided entities will be reloaded from Redis.
To force reloading for modified entities, you can pass ``force=True``.
"""
self._init()
from rom import Model
force = kwargs.get('force')
for o in objects:
if isinstance(o, (list, tuple)):
self.refresh(*o, force=force) # depends on [control=['if'], data=[]]
elif isinstance(o, Model):
if not o._new:
o.refresh(force=force) # depends on [control=['if'], data=[]]
else:
# all objects are re-added to the session after refresh,
# except for deleted entities...
self.add(o) # depends on [control=['if'], data=[]]
else:
raise ORMError('Cannot refresh an object that is not an instance of a Model (you provided %r)' % (o,)) # depends on [control=['for'], data=['o']] |
def competition_download_cli(self,
competition,
competition_opt=None,
file_name=None,
path=None,
force=False,
quiet=False):
""" a wrapper to competition_download_files, but first will parse input
from API client. Additional parameters are listed here, see
competition_download for remaining.
Parameters
=========
competition: the name of the competition
competition_opt: an alternative competition option provided by cli
file_name: the configuration file name
path: a path to download the file to
force: force the download if the file already exists (default False)
quiet: suppress verbose output (default is False)
"""
competition = competition or competition_opt
if competition is None:
competition = self.get_config_value(self.CONFIG_NAME_COMPETITION)
if competition is not None and not quiet:
print('Using competition: ' + competition)
if competition is None:
raise ValueError('No competition specified')
else:
if file_name is None:
self.competition_download_files(competition, path, force,
quiet)
else:
self.competition_download_file(competition, file_name, path,
force, quiet) | def function[competition_download_cli, parameter[self, competition, competition_opt, file_name, path, force, quiet]]:
constant[ a wrapper to competition_download_files, but first will parse input
from API client. Additional parameters are listed here, see
competition_download for remaining.
Parameters
=========
competition: the name of the competition
competition_opt: an alternative competition option provided by cli
file_name: the configuration file name
path: a path to download the file to
force: force the download if the file already exists (default False)
quiet: suppress verbose output (default is False)
]
variable[competition] assign[=] <ast.BoolOp object at 0x7da1b21858a0>
if compare[name[competition] is constant[None]] begin[:]
variable[competition] assign[=] call[name[self].get_config_value, parameter[name[self].CONFIG_NAME_COMPETITION]]
if <ast.BoolOp object at 0x7da1b2184250> begin[:]
call[name[print], parameter[binary_operation[constant[Using competition: ] + name[competition]]]]
if compare[name[competition] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b21845e0> | keyword[def] identifier[competition_download_cli] ( identifier[self] ,
identifier[competition] ,
identifier[competition_opt] = keyword[None] ,
identifier[file_name] = keyword[None] ,
identifier[path] = keyword[None] ,
identifier[force] = keyword[False] ,
identifier[quiet] = keyword[False] ):
literal[string]
identifier[competition] = identifier[competition] keyword[or] identifier[competition_opt]
keyword[if] identifier[competition] keyword[is] keyword[None] :
identifier[competition] = identifier[self] . identifier[get_config_value] ( identifier[self] . identifier[CONFIG_NAME_COMPETITION] )
keyword[if] identifier[competition] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[quiet] :
identifier[print] ( literal[string] + identifier[competition] )
keyword[if] identifier[competition] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[if] identifier[file_name] keyword[is] keyword[None] :
identifier[self] . identifier[competition_download_files] ( identifier[competition] , identifier[path] , identifier[force] ,
identifier[quiet] )
keyword[else] :
identifier[self] . identifier[competition_download_file] ( identifier[competition] , identifier[file_name] , identifier[path] ,
identifier[force] , identifier[quiet] ) | def competition_download_cli(self, competition, competition_opt=None, file_name=None, path=None, force=False, quiet=False):
""" a wrapper to competition_download_files, but first will parse input
from API client. Additional parameters are listed here, see
competition_download for remaining.
Parameters
=========
competition: the name of the competition
competition_opt: an alternative competition option provided by cli
file_name: the configuration file name
path: a path to download the file to
force: force the download if the file already exists (default False)
quiet: suppress verbose output (default is False)
"""
competition = competition or competition_opt
if competition is None:
competition = self.get_config_value(self.CONFIG_NAME_COMPETITION)
if competition is not None and (not quiet):
print('Using competition: ' + competition) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['competition']]
if competition is None:
raise ValueError('No competition specified') # depends on [control=['if'], data=[]]
elif file_name is None:
self.competition_download_files(competition, path, force, quiet) # depends on [control=['if'], data=[]]
else:
self.competition_download_file(competition, file_name, path, force, quiet) |
def add_markdown_cell(self, content, tags=None):
"""
Class method responsible for adding a markdown cell with content 'content' to the
Notebook object.
----------
Parameters
----------
content : str
Text/HTML code/... to include in the markdown cell (triple quote for multiline text).
tags : list
A list of tags to include in the markdown cell metadata.
"""
self.notebook["cells"].append(nb.v4.new_markdown_cell(content, **{"metadata":
{"tags": tags}})) | def function[add_markdown_cell, parameter[self, content, tags]]:
constant[
Class method responsible for adding a markdown cell with content 'content' to the
Notebook object.
----------
Parameters
----------
content : str
Text/HTML code/... to include in the markdown cell (triple quote for multiline text).
tags : list
A list of tags to include in the markdown cell metadata.
]
call[call[name[self].notebook][constant[cells]].append, parameter[call[name[nb].v4.new_markdown_cell, parameter[name[content]]]]] | keyword[def] identifier[add_markdown_cell] ( identifier[self] , identifier[content] , identifier[tags] = keyword[None] ):
literal[string]
identifier[self] . identifier[notebook] [ literal[string] ]. identifier[append] ( identifier[nb] . identifier[v4] . identifier[new_markdown_cell] ( identifier[content] ,**{ literal[string] :
{ literal[string] : identifier[tags] }})) | def add_markdown_cell(self, content, tags=None):
"""
Class method responsible for adding a markdown cell with content 'content' to the
Notebook object.
----------
Parameters
----------
content : str
Text/HTML code/... to include in the markdown cell (triple quote for multiline text).
tags : list
A list of tags to include in the markdown cell metadata.
"""
self.notebook['cells'].append(nb.v4.new_markdown_cell(content, **{'metadata': {'tags': tags}})) |
def index(self):
'''
Index funtion.
'''
self.render('ext_excel/index.html',
userinfo=self.userinfo,
cfg=CMS_CFG,
kwd={}, ) | def function[index, parameter[self]]:
constant[
Index funtion.
]
call[name[self].render, parameter[constant[ext_excel/index.html]]] | keyword[def] identifier[index] ( identifier[self] ):
literal[string]
identifier[self] . identifier[render] ( literal[string] ,
identifier[userinfo] = identifier[self] . identifier[userinfo] ,
identifier[cfg] = identifier[CMS_CFG] ,
identifier[kwd] ={},) | def index(self):
"""
Index funtion.
"""
self.render('ext_excel/index.html', userinfo=self.userinfo, cfg=CMS_CFG, kwd={}) |
def from_dict(cls, d):
"""
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
"""
result = super(ActorHandler, cls).from_dict(d)
if "actors" in d:
l = d["actors"]
for e in l:
if u"type" in e:
typestr = e[u"type"]
else:
typestr = e["type"]
result.actors.append(classes.get_dict_handler(typestr)(e))
return result | def function[from_dict, parameter[cls, d]]:
constant[
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
]
variable[result] assign[=] call[call[name[super], parameter[name[ActorHandler], name[cls]]].from_dict, parameter[name[d]]]
if compare[constant[actors] in name[d]] begin[:]
variable[l] assign[=] call[name[d]][constant[actors]]
for taget[name[e]] in starred[name[l]] begin[:]
if compare[constant[type] in name[e]] begin[:]
variable[typestr] assign[=] call[name[e]][constant[type]]
call[name[result].actors.append, parameter[call[call[name[classes].get_dict_handler, parameter[name[typestr]]], parameter[name[e]]]]]
return[name[result]] | keyword[def] identifier[from_dict] ( identifier[cls] , identifier[d] ):
literal[string]
identifier[result] = identifier[super] ( identifier[ActorHandler] , identifier[cls] ). identifier[from_dict] ( identifier[d] )
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[l] = identifier[d] [ literal[string] ]
keyword[for] identifier[e] keyword[in] identifier[l] :
keyword[if] literal[string] keyword[in] identifier[e] :
identifier[typestr] = identifier[e] [ literal[string] ]
keyword[else] :
identifier[typestr] = identifier[e] [ literal[string] ]
identifier[result] . identifier[actors] . identifier[append] ( identifier[classes] . identifier[get_dict_handler] ( identifier[typestr] )( identifier[e] ))
keyword[return] identifier[result] | def from_dict(cls, d):
"""
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
"""
result = super(ActorHandler, cls).from_dict(d)
if 'actors' in d:
l = d['actors']
for e in l:
if u'type' in e:
typestr = e[u'type'] # depends on [control=['if'], data=['e']]
else:
typestr = e['type']
result.actors.append(classes.get_dict_handler(typestr)(e)) # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=['d']]
return result |
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0)) | def function[run, parameter[self]]:
constant[Build modules, packages, and copy data files to build directory]
if <ast.BoolOp object at 0x7da1b1bc2bf0> begin[:]
return[None]
if name[self].py_modules begin[:]
call[name[self].build_modules, parameter[]]
if name[self].packages begin[:]
call[name[self].build_packages, parameter[]]
call[name[self].build_package_data, parameter[]]
call[name[self].run_2to3, parameter[name[self].__updated_files, constant[False]]]
call[name[self].run_2to3, parameter[name[self].__updated_files, constant[True]]]
call[name[self].run_2to3, parameter[name[self].__doctests_2to3, constant[True]]]
call[name[self].byte_compile, parameter[call[name[orig].build_py.get_outputs, parameter[name[self]]]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[py_modules] keyword[and] keyword[not] identifier[self] . identifier[packages] :
keyword[return]
keyword[if] identifier[self] . identifier[py_modules] :
identifier[self] . identifier[build_modules] ()
keyword[if] identifier[self] . identifier[packages] :
identifier[self] . identifier[build_packages] ()
identifier[self] . identifier[build_package_data] ()
identifier[self] . identifier[run_2to3] ( identifier[self] . identifier[__updated_files] , keyword[False] )
identifier[self] . identifier[run_2to3] ( identifier[self] . identifier[__updated_files] , keyword[True] )
identifier[self] . identifier[run_2to3] ( identifier[self] . identifier[__doctests_2to3] , keyword[True] )
identifier[self] . identifier[byte_compile] ( identifier[orig] . identifier[build_py] . identifier[get_outputs] ( identifier[self] , identifier[include_bytecode] = literal[int] )) | def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and (not self.packages):
return # depends on [control=['if'], data=[]]
if self.py_modules:
self.build_modules() # depends on [control=['if'], data=[]]
if self.packages:
self.build_packages()
self.build_package_data() # depends on [control=['if'], data=[]]
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0)) |
def verify_id_n_version(id, version):
"""Given an ``id`` and ``version``, verify the identified content exists.
"""
stmt = _get_sql('verify-id-and-version.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
valid = cursor.fetchone()[0]
except TypeError:
raise NotFound(join_ident_hash(id, version))
return True | def function[verify_id_n_version, parameter[id, version]]:
constant[Given an ``id`` and ``version``, verify the identified content exists.
]
variable[stmt] assign[=] call[name[_get_sql], parameter[constant[verify-id-and-version.sql]]]
variable[args] assign[=] call[name[dict], parameter[]]
with call[name[db_connect], parameter[]] begin[:]
with call[name[db_conn].cursor, parameter[]] begin[:]
call[name[cursor].execute, parameter[name[stmt], name[args]]]
<ast.Try object at 0x7da1b198feb0>
return[constant[True]] | keyword[def] identifier[verify_id_n_version] ( identifier[id] , identifier[version] ):
literal[string]
identifier[stmt] = identifier[_get_sql] ( literal[string] )
identifier[args] = identifier[dict] ( identifier[id] = identifier[id] , identifier[version] = identifier[version] )
keyword[with] identifier[db_connect] () keyword[as] identifier[db_conn] :
keyword[with] identifier[db_conn] . identifier[cursor] () keyword[as] identifier[cursor] :
identifier[cursor] . identifier[execute] ( identifier[stmt] , identifier[args] )
keyword[try] :
identifier[valid] = identifier[cursor] . identifier[fetchone] ()[ literal[int] ]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[NotFound] ( identifier[join_ident_hash] ( identifier[id] , identifier[version] ))
keyword[return] keyword[True] | def verify_id_n_version(id, version):
"""Given an ``id`` and ``version``, verify the identified content exists.
"""
stmt = _get_sql('verify-id-and-version.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
valid = cursor.fetchone()[0] # depends on [control=['try'], data=[]]
except TypeError:
raise NotFound(join_ident_hash(id, version)) # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['cursor']] # depends on [control=['with'], data=['db_conn']]
return True |
async def upload_file(
self, file, *, part_size_kb=None, file_name=None, use_cache=None,
progress_callback=None):
"""
Uploads the specified file and returns a handle (an instance of
:tl:`InputFile` or :tl:`InputFileBig`, as required) which can be
later used before it expires (they are usable during less than a day).
Uploading a file will simply return a "handle" to the file stored
remotely in the Telegram servers, which can be later used on. This
will **not** upload the file to your own chat or any chat at all.
Args:
file (`str` | `bytes` | `file`):
The path of the file, byte array, or stream that will be sent.
Note that if a byte array or a stream is given, a filename
or its type won't be inferred, and it will be sent as an
"unnamed application/octet-stream".
part_size_kb (`int`, optional):
Chunk size when uploading files. The larger, the less
requests will be made (up to 512KB maximum).
file_name (`str`, optional):
The file name which will be used on the resulting InputFile.
If not specified, the name will be taken from the ``file``
and if this is not a ``str``, it will be ``"unnamed"``.
use_cache (`type`, optional):
The type of cache to use (currently either :tl:`InputDocument`
or :tl:`InputPhoto`). If present and the file is small enough
to need the MD5, it will be checked against the database,
and if a match is found, the upload won't be made. Instead,
an instance of type ``use_cache`` will be returned.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(sent bytes, total)``.
Returns:
:tl:`InputFileBig` if the file size is larger than 10MB,
`telethon.tl.custom.inputsizedfile.InputSizedFile`
(subclass of :tl:`InputFile`) otherwise.
"""
if isinstance(file, (types.InputFile, types.InputFileBig)):
return file # Already uploaded
if not file_name and getattr(file, 'name', None):
file_name = file.name
if isinstance(file, str):
file_size = os.path.getsize(file)
elif isinstance(file, bytes):
file_size = len(file)
else:
if isinstance(file, io.IOBase) and file.seekable():
pos = file.tell()
else:
pos = None
# TODO Don't load the entire file in memory always
data = file.read()
if pos is not None:
file.seek(pos)
file = data
file_size = len(file)
# File will now either be a string or bytes
if not part_size_kb:
part_size_kb = utils.get_appropriated_part_size(file_size)
if part_size_kb > 512:
raise ValueError('The part size must be less or equal to 512KB')
part_size = int(part_size_kb * 1024)
if part_size % 1024 != 0:
raise ValueError(
'The part size must be evenly divisible by 1024')
# Set a default file name if None was specified
file_id = helpers.generate_random_long()
if not file_name:
if isinstance(file, str):
file_name = os.path.basename(file)
else:
file_name = str(file_id)
# If the file name lacks extension, add it if possible.
# Else Telegram complains with `PHOTO_EXT_INVALID_ERROR`
# even if the uploaded image is indeed a photo.
if not os.path.splitext(file_name)[-1]:
file_name += utils._get_extension(file)
# Determine whether the file is too big (over 10MB) or not
# Telegram does make a distinction between smaller or larger files
is_large = file_size > 10 * 1024 * 1024
hash_md5 = hashlib.md5()
if not is_large:
# Calculate the MD5 hash before anything else.
# As this needs to be done always for small files,
# might as well do it before anything else and
# check the cache.
if isinstance(file, str):
with open(file, 'rb') as stream:
file = stream.read()
hash_md5.update(file)
if use_cache:
cached = self.session.get_file(
hash_md5.digest(), file_size, cls=_CacheType(use_cache)
)
if cached:
return cached
part_count = (file_size + part_size - 1) // part_size
self._log[__name__].info('Uploading file of %d bytes in %d chunks of %d',
file_size, part_count, part_size)
with open(file, 'rb') if isinstance(file, str) else BytesIO(file)\
as stream:
for part_index in range(part_count):
# Read the file by in chunks of size part_size
part = stream.read(part_size)
# The SavePartRequest is different depending on whether
# the file is too large or not (over or less than 10MB)
if is_large:
request = functions.upload.SaveBigFilePartRequest(
file_id, part_index, part_count, part)
else:
request = functions.upload.SaveFilePartRequest(
file_id, part_index, part)
result = await self(request)
if result:
self._log[__name__].debug('Uploaded %d/%d',
part_index + 1, part_count)
if progress_callback:
progress_callback(stream.tell(), file_size)
else:
raise RuntimeError(
'Failed to upload file part {}.'.format(part_index))
if is_large:
return types.InputFileBig(file_id, part_count, file_name)
else:
return custom.InputSizedFile(
file_id, part_count, file_name, md5=hash_md5, size=file_size
) | <ast.AsyncFunctionDef object at 0x7da1b21dbfa0> | keyword[async] keyword[def] identifier[upload_file] (
identifier[self] , identifier[file] ,*, identifier[part_size_kb] = keyword[None] , identifier[file_name] = keyword[None] , identifier[use_cache] = keyword[None] ,
identifier[progress_callback] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[file] ,( identifier[types] . identifier[InputFile] , identifier[types] . identifier[InputFileBig] )):
keyword[return] identifier[file]
keyword[if] keyword[not] identifier[file_name] keyword[and] identifier[getattr] ( identifier[file] , literal[string] , keyword[None] ):
identifier[file_name] = identifier[file] . identifier[name]
keyword[if] identifier[isinstance] ( identifier[file] , identifier[str] ):
identifier[file_size] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[file] )
keyword[elif] identifier[isinstance] ( identifier[file] , identifier[bytes] ):
identifier[file_size] = identifier[len] ( identifier[file] )
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[file] , identifier[io] . identifier[IOBase] ) keyword[and] identifier[file] . identifier[seekable] ():
identifier[pos] = identifier[file] . identifier[tell] ()
keyword[else] :
identifier[pos] = keyword[None]
identifier[data] = identifier[file] . identifier[read] ()
keyword[if] identifier[pos] keyword[is] keyword[not] keyword[None] :
identifier[file] . identifier[seek] ( identifier[pos] )
identifier[file] = identifier[data]
identifier[file_size] = identifier[len] ( identifier[file] )
keyword[if] keyword[not] identifier[part_size_kb] :
identifier[part_size_kb] = identifier[utils] . identifier[get_appropriated_part_size] ( identifier[file_size] )
keyword[if] identifier[part_size_kb] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[part_size] = identifier[int] ( identifier[part_size_kb] * literal[int] )
keyword[if] identifier[part_size] % literal[int] != literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] )
identifier[file_id] = identifier[helpers] . identifier[generate_random_long] ()
keyword[if] keyword[not] identifier[file_name] :
keyword[if] identifier[isinstance] ( identifier[file] , identifier[str] ):
identifier[file_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[file] )
keyword[else] :
identifier[file_name] = identifier[str] ( identifier[file_id] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_name] )[- literal[int] ]:
identifier[file_name] += identifier[utils] . identifier[_get_extension] ( identifier[file] )
identifier[is_large] = identifier[file_size] > literal[int] * literal[int] * literal[int]
identifier[hash_md5] = identifier[hashlib] . identifier[md5] ()
keyword[if] keyword[not] identifier[is_large] :
keyword[if] identifier[isinstance] ( identifier[file] , identifier[str] ):
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[stream] :
identifier[file] = identifier[stream] . identifier[read] ()
identifier[hash_md5] . identifier[update] ( identifier[file] )
keyword[if] identifier[use_cache] :
identifier[cached] = identifier[self] . identifier[session] . identifier[get_file] (
identifier[hash_md5] . identifier[digest] (), identifier[file_size] , identifier[cls] = identifier[_CacheType] ( identifier[use_cache] )
)
keyword[if] identifier[cached] :
keyword[return] identifier[cached]
identifier[part_count] =( identifier[file_size] + identifier[part_size] - literal[int] )// identifier[part_size]
identifier[self] . identifier[_log] [ identifier[__name__] ]. identifier[info] ( literal[string] ,
identifier[file_size] , identifier[part_count] , identifier[part_size] )
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[if] identifier[isinstance] ( identifier[file] , identifier[str] ) keyword[else] identifier[BytesIO] ( identifier[file] ) keyword[as] identifier[stream] :
keyword[for] identifier[part_index] keyword[in] identifier[range] ( identifier[part_count] ):
identifier[part] = identifier[stream] . identifier[read] ( identifier[part_size] )
keyword[if] identifier[is_large] :
identifier[request] = identifier[functions] . identifier[upload] . identifier[SaveBigFilePartRequest] (
identifier[file_id] , identifier[part_index] , identifier[part_count] , identifier[part] )
keyword[else] :
identifier[request] = identifier[functions] . identifier[upload] . identifier[SaveFilePartRequest] (
identifier[file_id] , identifier[part_index] , identifier[part] )
identifier[result] = keyword[await] identifier[self] ( identifier[request] )
keyword[if] identifier[result] :
identifier[self] . identifier[_log] [ identifier[__name__] ]. identifier[debug] ( literal[string] ,
identifier[part_index] + literal[int] , identifier[part_count] )
keyword[if] identifier[progress_callback] :
identifier[progress_callback] ( identifier[stream] . identifier[tell] (), identifier[file_size] )
keyword[else] :
keyword[raise] identifier[RuntimeError] (
literal[string] . identifier[format] ( identifier[part_index] ))
keyword[if] identifier[is_large] :
keyword[return] identifier[types] . identifier[InputFileBig] ( identifier[file_id] , identifier[part_count] , identifier[file_name] )
keyword[else] :
keyword[return] identifier[custom] . identifier[InputSizedFile] (
identifier[file_id] , identifier[part_count] , identifier[file_name] , identifier[md5] = identifier[hash_md5] , identifier[size] = identifier[file_size]
) | async def upload_file(self, file, *, part_size_kb=None, file_name=None, use_cache=None, progress_callback=None):
"""
Uploads the specified file and returns a handle (an instance of
:tl:`InputFile` or :tl:`InputFileBig`, as required) which can be
later used before it expires (they are usable during less than a day).
Uploading a file will simply return a "handle" to the file stored
remotely in the Telegram servers, which can be later used on. This
will **not** upload the file to your own chat or any chat at all.
Args:
file (`str` | `bytes` | `file`):
The path of the file, byte array, or stream that will be sent.
Note that if a byte array or a stream is given, a filename
or its type won't be inferred, and it will be sent as an
"unnamed application/octet-stream".
part_size_kb (`int`, optional):
Chunk size when uploading files. The larger, the less
requests will be made (up to 512KB maximum).
file_name (`str`, optional):
The file name which will be used on the resulting InputFile.
If not specified, the name will be taken from the ``file``
and if this is not a ``str``, it will be ``"unnamed"``.
use_cache (`type`, optional):
The type of cache to use (currently either :tl:`InputDocument`
or :tl:`InputPhoto`). If present and the file is small enough
to need the MD5, it will be checked against the database,
and if a match is found, the upload won't be made. Instead,
an instance of type ``use_cache`` will be returned.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(sent bytes, total)``.
Returns:
:tl:`InputFileBig` if the file size is larger than 10MB,
`telethon.tl.custom.inputsizedfile.InputSizedFile`
(subclass of :tl:`InputFile`) otherwise.
"""
if isinstance(file, (types.InputFile, types.InputFileBig)):
return file # Already uploaded # depends on [control=['if'], data=[]]
if not file_name and getattr(file, 'name', None):
file_name = file.name # depends on [control=['if'], data=[]]
if isinstance(file, str):
file_size = os.path.getsize(file) # depends on [control=['if'], data=[]]
elif isinstance(file, bytes):
file_size = len(file) # depends on [control=['if'], data=[]]
else:
if isinstance(file, io.IOBase) and file.seekable():
pos = file.tell() # depends on [control=['if'], data=[]]
else:
pos = None
# TODO Don't load the entire file in memory always
data = file.read()
if pos is not None:
file.seek(pos) # depends on [control=['if'], data=['pos']]
file = data
file_size = len(file)
# File will now either be a string or bytes
if not part_size_kb:
part_size_kb = utils.get_appropriated_part_size(file_size) # depends on [control=['if'], data=[]]
if part_size_kb > 512:
raise ValueError('The part size must be less or equal to 512KB') # depends on [control=['if'], data=[]]
part_size = int(part_size_kb * 1024)
if part_size % 1024 != 0:
raise ValueError('The part size must be evenly divisible by 1024') # depends on [control=['if'], data=[]]
# Set a default file name if None was specified
file_id = helpers.generate_random_long()
if not file_name:
if isinstance(file, str):
file_name = os.path.basename(file) # depends on [control=['if'], data=[]]
else:
file_name = str(file_id) # depends on [control=['if'], data=[]]
# If the file name lacks extension, add it if possible.
# Else Telegram complains with `PHOTO_EXT_INVALID_ERROR`
# even if the uploaded image is indeed a photo.
if not os.path.splitext(file_name)[-1]:
file_name += utils._get_extension(file) # depends on [control=['if'], data=[]]
# Determine whether the file is too big (over 10MB) or not
# Telegram does make a distinction between smaller or larger files
is_large = file_size > 10 * 1024 * 1024
hash_md5 = hashlib.md5()
if not is_large:
# Calculate the MD5 hash before anything else.
# As this needs to be done always for small files,
# might as well do it before anything else and
# check the cache.
if isinstance(file, str):
with open(file, 'rb') as stream:
file = stream.read() # depends on [control=['with'], data=['stream']] # depends on [control=['if'], data=[]]
hash_md5.update(file)
if use_cache:
cached = self.session.get_file(hash_md5.digest(), file_size, cls=_CacheType(use_cache))
if cached:
return cached # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
part_count = (file_size + part_size - 1) // part_size
self._log[__name__].info('Uploading file of %d bytes in %d chunks of %d', file_size, part_count, part_size)
with open(file, 'rb') if isinstance(file, str) else BytesIO(file) as stream:
for part_index in range(part_count):
# Read the file by in chunks of size part_size
part = stream.read(part_size)
# The SavePartRequest is different depending on whether
# the file is too large or not (over or less than 10MB)
if is_large:
request = functions.upload.SaveBigFilePartRequest(file_id, part_index, part_count, part) # depends on [control=['if'], data=[]]
else:
request = functions.upload.SaveFilePartRequest(file_id, part_index, part)
result = await self(request)
if result:
self._log[__name__].debug('Uploaded %d/%d', part_index + 1, part_count)
if progress_callback:
progress_callback(stream.tell(), file_size) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Failed to upload file part {}.'.format(part_index)) # depends on [control=['for'], data=['part_index']] # depends on [control=['with'], data=['stream']]
if is_large:
return types.InputFileBig(file_id, part_count, file_name) # depends on [control=['if'], data=[]]
else:
return custom.InputSizedFile(file_id, part_count, file_name, md5=hash_md5, size=file_size) |
def ranges_intersect(rset):
"""
Recursively calls the range_intersect() - pairwise version.
>>> ranges_intersect([(48, 65), (45, 55), (50, 56)])
[50, 55]
"""
if not rset:
return None
a = rset[0]
for b in rset[1:]:
if not a:
return None
a = range_intersect(a, b)
return a | def function[ranges_intersect, parameter[rset]]:
constant[
Recursively calls the range_intersect() - pairwise version.
>>> ranges_intersect([(48, 65), (45, 55), (50, 56)])
[50, 55]
]
if <ast.UnaryOp object at 0x7da1b08ab910> begin[:]
return[constant[None]]
variable[a] assign[=] call[name[rset]][constant[0]]
for taget[name[b]] in starred[call[name[rset]][<ast.Slice object at 0x7da1b0900dc0>]] begin[:]
if <ast.UnaryOp object at 0x7da1b0902020> begin[:]
return[constant[None]]
variable[a] assign[=] call[name[range_intersect], parameter[name[a], name[b]]]
return[name[a]] | keyword[def] identifier[ranges_intersect] ( identifier[rset] ):
literal[string]
keyword[if] keyword[not] identifier[rset] :
keyword[return] keyword[None]
identifier[a] = identifier[rset] [ literal[int] ]
keyword[for] identifier[b] keyword[in] identifier[rset] [ literal[int] :]:
keyword[if] keyword[not] identifier[a] :
keyword[return] keyword[None]
identifier[a] = identifier[range_intersect] ( identifier[a] , identifier[b] )
keyword[return] identifier[a] | def ranges_intersect(rset):
"""
Recursively calls the range_intersect() - pairwise version.
>>> ranges_intersect([(48, 65), (45, 55), (50, 56)])
[50, 55]
"""
if not rset:
return None # depends on [control=['if'], data=[]]
a = rset[0]
for b in rset[1:]:
if not a:
return None # depends on [control=['if'], data=[]]
a = range_intersect(a, b) # depends on [control=['for'], data=['b']]
return a |
def _custom_icon(self, name, **kwargs):
"""Return the custom icon corresponding to the given name."""
options = dict(_default_options, **kwargs)
if name in self.painters:
painter = self.painters[name]
return self._icon_by_painter(painter, options)
else:
return QIcon() | def function[_custom_icon, parameter[self, name]]:
constant[Return the custom icon corresponding to the given name.]
variable[options] assign[=] call[name[dict], parameter[name[_default_options]]]
if compare[name[name] in name[self].painters] begin[:]
variable[painter] assign[=] call[name[self].painters][name[name]]
return[call[name[self]._icon_by_painter, parameter[name[painter], name[options]]]] | keyword[def] identifier[_custom_icon] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[options] = identifier[dict] ( identifier[_default_options] ,** identifier[kwargs] )
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[painters] :
identifier[painter] = identifier[self] . identifier[painters] [ identifier[name] ]
keyword[return] identifier[self] . identifier[_icon_by_painter] ( identifier[painter] , identifier[options] )
keyword[else] :
keyword[return] identifier[QIcon] () | def _custom_icon(self, name, **kwargs):
"""Return the custom icon corresponding to the given name."""
options = dict(_default_options, **kwargs)
if name in self.painters:
painter = self.painters[name]
return self._icon_by_painter(painter, options) # depends on [control=['if'], data=['name']]
else:
return QIcon() |
def seek(self, offset, whence=SEEK_SET):
"""
Change the stream position to the given byte offset.
Args:
offset (int): Offset is interpreted relative to the position
indicated by whence.
whence (int): The default value for whence is SEEK_SET.
Values for whence are:
SEEK_SET or 0 – start of the stream (the default);
offset should be zero or positive
SEEK_CUR or 1 – current stream position;
offset may be negative
SEEK_END or 2 – end of the stream;
offset is usually negative
Returns:
int: The new absolute position.
"""
if not self._seekable:
raise UnsupportedOperation('seek')
seek = self._update_seek(offset, whence)
# If seek move out of file, add padding until new seek position.
if self._writable:
size = len(self._write_buffer)
if seek > size:
self._write_buffer[seek:size] = b'\0' * (seek - size)
return seek | def function[seek, parameter[self, offset, whence]]:
constant[
Change the stream position to the given byte offset.
Args:
offset (int): Offset is interpreted relative to the position
indicated by whence.
whence (int): The default value for whence is SEEK_SET.
Values for whence are:
SEEK_SET or 0 – start of the stream (the default);
offset should be zero or positive
SEEK_CUR or 1 – current stream position;
offset may be negative
SEEK_END or 2 – end of the stream;
offset is usually negative
Returns:
int: The new absolute position.
]
if <ast.UnaryOp object at 0x7da1b1b0ed40> begin[:]
<ast.Raise object at 0x7da1b1b0c040>
variable[seek] assign[=] call[name[self]._update_seek, parameter[name[offset], name[whence]]]
if name[self]._writable begin[:]
variable[size] assign[=] call[name[len], parameter[name[self]._write_buffer]]
if compare[name[seek] greater[>] name[size]] begin[:]
call[name[self]._write_buffer][<ast.Slice object at 0x7da1b1a1c0a0>] assign[=] binary_operation[constant[b'\x00'] * binary_operation[name[seek] - name[size]]]
return[name[seek]] | keyword[def] identifier[seek] ( identifier[self] , identifier[offset] , identifier[whence] = identifier[SEEK_SET] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_seekable] :
keyword[raise] identifier[UnsupportedOperation] ( literal[string] )
identifier[seek] = identifier[self] . identifier[_update_seek] ( identifier[offset] , identifier[whence] )
keyword[if] identifier[self] . identifier[_writable] :
identifier[size] = identifier[len] ( identifier[self] . identifier[_write_buffer] )
keyword[if] identifier[seek] > identifier[size] :
identifier[self] . identifier[_write_buffer] [ identifier[seek] : identifier[size] ]= literal[string] *( identifier[seek] - identifier[size] )
keyword[return] identifier[seek] | def seek(self, offset, whence=SEEK_SET):
"""
Change the stream position to the given byte offset.
Args:
offset (int): Offset is interpreted relative to the position
indicated by whence.
whence (int): The default value for whence is SEEK_SET.
Values for whence are:
SEEK_SET or 0 – start of the stream (the default);
offset should be zero or positive
SEEK_CUR or 1 – current stream position;
offset may be negative
SEEK_END or 2 – end of the stream;
offset is usually negative
Returns:
int: The new absolute position.
"""
if not self._seekable:
raise UnsupportedOperation('seek') # depends on [control=['if'], data=[]]
seek = self._update_seek(offset, whence)
# If seek move out of file, add padding until new seek position.
if self._writable:
size = len(self._write_buffer)
if seek > size:
self._write_buffer[seek:size] = b'\x00' * (seek - size) # depends on [control=['if'], data=['seek', 'size']] # depends on [control=['if'], data=[]]
return seek |
def get_host_filters(self, expr):
# pylint: disable=too-many-return-statements
"""Generates host filter list corresponding to the expression ::
* '*' => any
* 'g' => group filter
* 'r' => regex name filter
* 'l' => bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
"""
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_host_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_host_by_group(expr)]
if "r" in flags:
return [filter_host_by_regex(expr)]
if "l" in flags:
return [filter_host_by_bp_rule_label(expr)]
if "t" in flags:
return [filter_host_by_tag(expr)]
return [filter_none] | def function[get_host_filters, parameter[self, expr]]:
constant[Generates host filter list corresponding to the expression ::
* '*' => any
* 'g' => group filter
* 'r' => regex name filter
* 'l' => bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
]
if compare[name[expr] equal[==] constant[*]] begin[:]
return[list[[<ast.Name object at 0x7da18dc07970>]]]
variable[match] assign[=] call[name[re].search, parameter[binary_operation[constant[^([%s]+):(.*)] <ast.Mod object at 0x7da2590d6920> name[self].host_flags], name[expr]]]
if compare[name[match] is constant[None]] begin[:]
return[list[[<ast.Call object at 0x7da18dc07fa0>]]]
<ast.Tuple object at 0x7da18dc06b60> assign[=] call[name[match].groups, parameter[]]
if compare[constant[g] in name[flags]] begin[:]
return[list[[<ast.Call object at 0x7da18dc06140>]]]
if compare[constant[r] in name[flags]] begin[:]
return[list[[<ast.Call object at 0x7da18dc05d50>]]]
if compare[constant[l] in name[flags]] begin[:]
return[list[[<ast.Call object at 0x7da18dc04ca0>]]]
if compare[constant[t] in name[flags]] begin[:]
return[list[[<ast.Call object at 0x7da2044c2140>]]]
return[list[[<ast.Name object at 0x7da2044c0970>]]] | keyword[def] identifier[get_host_filters] ( identifier[self] , identifier[expr] ):
literal[string]
keyword[if] identifier[expr] == literal[string] :
keyword[return] [ identifier[filter_any] ]
identifier[match] = identifier[re] . identifier[search] ( literal[string] % identifier[self] . identifier[host_flags] , identifier[expr] )
keyword[if] identifier[match] keyword[is] keyword[None] :
keyword[return] [ identifier[filter_host_by_name] ( identifier[expr] )]
identifier[flags] , identifier[expr] = identifier[match] . identifier[groups] ()
keyword[if] literal[string] keyword[in] identifier[flags] :
keyword[return] [ identifier[filter_host_by_group] ( identifier[expr] )]
keyword[if] literal[string] keyword[in] identifier[flags] :
keyword[return] [ identifier[filter_host_by_regex] ( identifier[expr] )]
keyword[if] literal[string] keyword[in] identifier[flags] :
keyword[return] [ identifier[filter_host_by_bp_rule_label] ( identifier[expr] )]
keyword[if] literal[string] keyword[in] identifier[flags] :
keyword[return] [ identifier[filter_host_by_tag] ( identifier[expr] )]
keyword[return] [ identifier[filter_none] ] | def get_host_filters(self, expr):
# pylint: disable=too-many-return-statements
"Generates host filter list corresponding to the expression ::\n\n * '*' => any\n * 'g' => group filter\n * 'r' => regex name filter\n * 'l' => bp rule label filter\n * 't' => tag filter\n * '' => none filter\n * No flag match => host name filter\n\n :param expr: expression to parse\n :type expr: str\n :return: filter list\n :rtype: list\n "
if expr == '*':
return [filter_any] # depends on [control=['if'], data=[]]
match = re.search('^([%s]+):(.*)' % self.host_flags, expr)
if match is None:
return [filter_host_by_name(expr)] # depends on [control=['if'], data=[]]
(flags, expr) = match.groups()
if 'g' in flags:
return [filter_host_by_group(expr)] # depends on [control=['if'], data=[]]
if 'r' in flags:
return [filter_host_by_regex(expr)] # depends on [control=['if'], data=[]]
if 'l' in flags:
return [filter_host_by_bp_rule_label(expr)] # depends on [control=['if'], data=[]]
if 't' in flags:
return [filter_host_by_tag(expr)] # depends on [control=['if'], data=[]]
return [filter_none] |
def _clearPrices(self):
""" Clears prices according to auction type.
"""
for offbid in self.offers + self.bids:
if self.auctionType == DISCRIMINATIVE:
offbid.clearedPrice = offbid.price
elif self.auctionType == FIRST_PRICE:
offbid.clearedPrice = offbid.lmbda
else:
raise ValueError | def function[_clearPrices, parameter[self]]:
constant[ Clears prices according to auction type.
]
for taget[name[offbid]] in starred[binary_operation[name[self].offers + name[self].bids]] begin[:]
if compare[name[self].auctionType equal[==] name[DISCRIMINATIVE]] begin[:]
name[offbid].clearedPrice assign[=] name[offbid].price | keyword[def] identifier[_clearPrices] ( identifier[self] ):
literal[string]
keyword[for] identifier[offbid] keyword[in] identifier[self] . identifier[offers] + identifier[self] . identifier[bids] :
keyword[if] identifier[self] . identifier[auctionType] == identifier[DISCRIMINATIVE] :
identifier[offbid] . identifier[clearedPrice] = identifier[offbid] . identifier[price]
keyword[elif] identifier[self] . identifier[auctionType] == identifier[FIRST_PRICE] :
identifier[offbid] . identifier[clearedPrice] = identifier[offbid] . identifier[lmbda]
keyword[else] :
keyword[raise] identifier[ValueError] | def _clearPrices(self):
""" Clears prices according to auction type.
"""
for offbid in self.offers + self.bids:
if self.auctionType == DISCRIMINATIVE:
offbid.clearedPrice = offbid.price # depends on [control=['if'], data=[]]
elif self.auctionType == FIRST_PRICE:
offbid.clearedPrice = offbid.lmbda # depends on [control=['if'], data=[]]
else:
raise ValueError # depends on [control=['for'], data=['offbid']] |
def _get_distance_scaling(self, C, mag, rhypo):
"""
Returns the distance scalig term
"""
return (C["a3"] * np.log(rhypo)) + (C["a4"] + C["a5"] * mag) * rhypo | def function[_get_distance_scaling, parameter[self, C, mag, rhypo]]:
constant[
Returns the distance scalig term
]
return[binary_operation[binary_operation[call[name[C]][constant[a3]] * call[name[np].log, parameter[name[rhypo]]]] + binary_operation[binary_operation[call[name[C]][constant[a4]] + binary_operation[call[name[C]][constant[a5]] * name[mag]]] * name[rhypo]]]] | keyword[def] identifier[_get_distance_scaling] ( identifier[self] , identifier[C] , identifier[mag] , identifier[rhypo] ):
literal[string]
keyword[return] ( identifier[C] [ literal[string] ]* identifier[np] . identifier[log] ( identifier[rhypo] ))+( identifier[C] [ literal[string] ]+ identifier[C] [ literal[string] ]* identifier[mag] )* identifier[rhypo] | def _get_distance_scaling(self, C, mag, rhypo):
"""
Returns the distance scalig term
"""
return C['a3'] * np.log(rhypo) + (C['a4'] + C['a5'] * mag) * rhypo |
def get_user_info(self, recipient_id, fields=None):
"""Getting information about the user
https://developers.facebook.com/docs/messenger-platform/user-profile
Input:
recipient_id: recipient id to send to
Output:
Response from API as <dict>
"""
params = {}
if fields is not None and isinstance(fields, (list, tuple)):
params['fields'] = ",".join(fields)
params.update(self.auth_args)
request_endpoint = '{0}/{1}'.format(self.graph_url, recipient_id)
response = requests.get(request_endpoint, params=params)
if response.status_code == 200:
return response.json()
return None | def function[get_user_info, parameter[self, recipient_id, fields]]:
constant[Getting information about the user
https://developers.facebook.com/docs/messenger-platform/user-profile
Input:
recipient_id: recipient id to send to
Output:
Response from API as <dict>
]
variable[params] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b20b5fc0> begin[:]
call[name[params]][constant[fields]] assign[=] call[constant[,].join, parameter[name[fields]]]
call[name[params].update, parameter[name[self].auth_args]]
variable[request_endpoint] assign[=] call[constant[{0}/{1}].format, parameter[name[self].graph_url, name[recipient_id]]]
variable[response] assign[=] call[name[requests].get, parameter[name[request_endpoint]]]
if compare[name[response].status_code equal[==] constant[200]] begin[:]
return[call[name[response].json, parameter[]]]
return[constant[None]] | keyword[def] identifier[get_user_info] ( identifier[self] , identifier[recipient_id] , identifier[fields] = keyword[None] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[fields] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[fields] ,( identifier[list] , identifier[tuple] )):
identifier[params] [ literal[string] ]= literal[string] . identifier[join] ( identifier[fields] )
identifier[params] . identifier[update] ( identifier[self] . identifier[auth_args] )
identifier[request_endpoint] = literal[string] . identifier[format] ( identifier[self] . identifier[graph_url] , identifier[recipient_id] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[request_endpoint] , identifier[params] = identifier[params] )
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[response] . identifier[json] ()
keyword[return] keyword[None] | def get_user_info(self, recipient_id, fields=None):
"""Getting information about the user
https://developers.facebook.com/docs/messenger-platform/user-profile
Input:
recipient_id: recipient id to send to
Output:
Response from API as <dict>
"""
params = {}
if fields is not None and isinstance(fields, (list, tuple)):
params['fields'] = ','.join(fields) # depends on [control=['if'], data=[]]
params.update(self.auth_args)
request_endpoint = '{0}/{1}'.format(self.graph_url, recipient_id)
response = requests.get(request_endpoint, params=params)
if response.status_code == 200:
return response.json() # depends on [control=['if'], data=[]]
return None |
def get_external_store(self, project_name, store_name):
""" get the logstore meta info
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type store_name: string
:param store_name: the logstore name
:return: GetLogStoreResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/externalstores/" + store_name
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
# add storeName if not existing
if 'externalStoreName' not in resp:
resp['externalStoreName'] = store_name
return GetExternalStoreResponse(resp, header) | def function[get_external_store, parameter[self, project_name, store_name]]:
constant[ get the logstore meta info
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type store_name: string
:param store_name: the logstore name
:return: GetLogStoreResponse
:raise: LogException
]
variable[headers] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
variable[resource] assign[=] binary_operation[constant[/externalstores/] + name[store_name]]
<ast.Tuple object at 0x7da1b088a320> assign[=] call[name[self]._send, parameter[constant[GET], name[project_name], constant[None], name[resource], name[params], name[headers]]]
if compare[constant[externalStoreName] <ast.NotIn object at 0x7da2590d7190> name[resp]] begin[:]
call[name[resp]][constant[externalStoreName]] assign[=] name[store_name]
return[call[name[GetExternalStoreResponse], parameter[name[resp], name[header]]]] | keyword[def] identifier[get_external_store] ( identifier[self] , identifier[project_name] , identifier[store_name] ):
literal[string]
identifier[headers] ={}
identifier[params] ={}
identifier[resource] = literal[string] + identifier[store_name]
( identifier[resp] , identifier[header] )= identifier[self] . identifier[_send] ( literal[string] , identifier[project_name] , keyword[None] , identifier[resource] , identifier[params] , identifier[headers] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[resp] :
identifier[resp] [ literal[string] ]= identifier[store_name]
keyword[return] identifier[GetExternalStoreResponse] ( identifier[resp] , identifier[header] ) | def get_external_store(self, project_name, store_name):
""" get the logstore meta info
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type store_name: string
:param store_name: the logstore name
:return: GetLogStoreResponse
:raise: LogException
"""
headers = {}
params = {}
resource = '/externalstores/' + store_name
(resp, header) = self._send('GET', project_name, None, resource, params, headers) # add storeName if not existing
if 'externalStoreName' not in resp:
resp['externalStoreName'] = store_name # depends on [control=['if'], data=['resp']]
return GetExternalStoreResponse(resp, header) |
def process(self):
"""Process the files and collect necessary data."""
# Extract relations
self.rh = RelationHandler()
self.rh.apply_file(self.filename)
logging.debug('Found %d public transport relations.', len(self.rh.relations))
# Collect ids of interest
node_ids, stop_node_ids, way_ids, reverse_map = self.__collect_ids()
# Extract nodes
self.nh = NodeHandler(node_ids)
self.nh.apply_file(self.filename, locations=True)
count = 0
for idx, missing_node_id in enumerate(self.nh.missing_node_ids):
count += 1
logging.warning(
'[no data] missing stop node. rel: https://osm.org/relation/%s node: https://osm.org/node/%s.',
reverse_map[missing_node_id], missing_node_id)
if count:
logging.warning(
'%d nodes that appear in relations are missing.',
count)
else:
logging.debug('Lucky you! All relation member nodes were found.')
# Extract ways
self.wh = WayHandler(way_ids)
self.wh.apply_file(self.filename, locations=True) | def function[process, parameter[self]]:
constant[Process the files and collect necessary data.]
name[self].rh assign[=] call[name[RelationHandler], parameter[]]
call[name[self].rh.apply_file, parameter[name[self].filename]]
call[name[logging].debug, parameter[constant[Found %d public transport relations.], call[name[len], parameter[name[self].rh.relations]]]]
<ast.Tuple object at 0x7da1b032d2a0> assign[=] call[name[self].__collect_ids, parameter[]]
name[self].nh assign[=] call[name[NodeHandler], parameter[name[node_ids]]]
call[name[self].nh.apply_file, parameter[name[self].filename]]
variable[count] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b032d6f0>, <ast.Name object at 0x7da1b032e9b0>]]] in starred[call[name[enumerate], parameter[name[self].nh.missing_node_ids]]] begin[:]
<ast.AugAssign object at 0x7da1b032f4c0>
call[name[logging].warning, parameter[constant[[no data] missing stop node. rel: https://osm.org/relation/%s node: https://osm.org/node/%s.], call[name[reverse_map]][name[missing_node_id]], name[missing_node_id]]]
if name[count] begin[:]
call[name[logging].warning, parameter[constant[%d nodes that appear in relations are missing.], name[count]]]
name[self].wh assign[=] call[name[WayHandler], parameter[name[way_ids]]]
call[name[self].wh.apply_file, parameter[name[self].filename]] | keyword[def] identifier[process] ( identifier[self] ):
literal[string]
identifier[self] . identifier[rh] = identifier[RelationHandler] ()
identifier[self] . identifier[rh] . identifier[apply_file] ( identifier[self] . identifier[filename] )
identifier[logging] . identifier[debug] ( literal[string] , identifier[len] ( identifier[self] . identifier[rh] . identifier[relations] ))
identifier[node_ids] , identifier[stop_node_ids] , identifier[way_ids] , identifier[reverse_map] = identifier[self] . identifier[__collect_ids] ()
identifier[self] . identifier[nh] = identifier[NodeHandler] ( identifier[node_ids] )
identifier[self] . identifier[nh] . identifier[apply_file] ( identifier[self] . identifier[filename] , identifier[locations] = keyword[True] )
identifier[count] = literal[int]
keyword[for] identifier[idx] , identifier[missing_node_id] keyword[in] identifier[enumerate] ( identifier[self] . identifier[nh] . identifier[missing_node_ids] ):
identifier[count] += literal[int]
identifier[logging] . identifier[warning] (
literal[string] ,
identifier[reverse_map] [ identifier[missing_node_id] ], identifier[missing_node_id] )
keyword[if] identifier[count] :
identifier[logging] . identifier[warning] (
literal[string] ,
identifier[count] )
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] )
identifier[self] . identifier[wh] = identifier[WayHandler] ( identifier[way_ids] )
identifier[self] . identifier[wh] . identifier[apply_file] ( identifier[self] . identifier[filename] , identifier[locations] = keyword[True] ) | def process(self):
"""Process the files and collect necessary data."""
# Extract relations
self.rh = RelationHandler()
self.rh.apply_file(self.filename)
logging.debug('Found %d public transport relations.', len(self.rh.relations))
# Collect ids of interest
(node_ids, stop_node_ids, way_ids, reverse_map) = self.__collect_ids()
# Extract nodes
self.nh = NodeHandler(node_ids)
self.nh.apply_file(self.filename, locations=True)
count = 0
for (idx, missing_node_id) in enumerate(self.nh.missing_node_ids):
count += 1
logging.warning('[no data] missing stop node. rel: https://osm.org/relation/%s node: https://osm.org/node/%s.', reverse_map[missing_node_id], missing_node_id) # depends on [control=['for'], data=[]]
if count:
logging.warning('%d nodes that appear in relations are missing.', count) # depends on [control=['if'], data=[]]
else:
logging.debug('Lucky you! All relation member nodes were found.')
# Extract ways
self.wh = WayHandler(way_ids)
self.wh.apply_file(self.filename, locations=True) |
def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True | def function[_check_perpendicular_r2_axis, parameter[self, axis]]:
constant[
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
]
variable[min_set] assign[=] call[name[self]._get_smallest_set_not_on_axis, parameter[name[axis]]]
for taget[tuple[[<ast.Name object at 0x7da1b1c5a560>, <ast.Name object at 0x7da1b1c58df0>]]] in starred[call[name[itertools].combinations, parameter[name[min_set], constant[2]]]] begin[:]
variable[test_axis] assign[=] call[name[np].cross, parameter[binary_operation[name[s1].coords - name[s2].coords], name[axis]]]
if compare[call[name[np].linalg.norm, parameter[name[test_axis]]] greater[>] name[self].tol] begin[:]
variable[op] assign[=] call[name[SymmOp].from_axis_angle_and_translation, parameter[name[test_axis], constant[180]]]
variable[r2present] assign[=] call[name[self].is_valid_op, parameter[name[op]]]
if name[r2present] begin[:]
call[name[self].symmops.append, parameter[name[op]]]
call[name[self].rot_sym.append, parameter[tuple[[<ast.Name object at 0x7da1b1c5b160>, <ast.Constant object at 0x7da1b1c5b010>]]]]
return[constant[True]] | keyword[def] identifier[_check_perpendicular_r2_axis] ( identifier[self] , identifier[axis] ):
literal[string]
identifier[min_set] = identifier[self] . identifier[_get_smallest_set_not_on_axis] ( identifier[axis] )
keyword[for] identifier[s1] , identifier[s2] keyword[in] identifier[itertools] . identifier[combinations] ( identifier[min_set] , literal[int] ):
identifier[test_axis] = identifier[np] . identifier[cross] ( identifier[s1] . identifier[coords] - identifier[s2] . identifier[coords] , identifier[axis] )
keyword[if] identifier[np] . identifier[linalg] . identifier[norm] ( identifier[test_axis] )> identifier[self] . identifier[tol] :
identifier[op] = identifier[SymmOp] . identifier[from_axis_angle_and_translation] ( identifier[test_axis] , literal[int] )
identifier[r2present] = identifier[self] . identifier[is_valid_op] ( identifier[op] )
keyword[if] identifier[r2present] :
identifier[self] . identifier[symmops] . identifier[append] ( identifier[op] )
identifier[self] . identifier[rot_sym] . identifier[append] (( identifier[test_axis] , literal[int] ))
keyword[return] keyword[True] | def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for (s1, s2) in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def downsample(work_dir, sample_name, fastq_left_fpath, fastq_right_fpath, downsample_to, num_pairs=None):
""" get N random headers from a fastq file without reading the
whole thing into memory
modified from: http://www.biostars.org/p/6544/
"""
sample_name = sample_name or splitext(''.join(lc if lc == rc else '' for lc, rc in zip(fastq_left_fpath, fastq_right_fpath)))[0]
l_out_fpath = make_downsampled_fpath(work_dir, fastq_left_fpath)
r_out_fpath = make_downsampled_fpath(work_dir, fastq_right_fpath)
if can_reuse(l_out_fpath, [fastq_left_fpath, fastq_right_fpath]):
return l_out_fpath, r_out_fpath
info('Processing ' + sample_name)
if num_pairs is None:
info(sample_name + ': counting number of reads in fastq...')
num_pairs = _count_records_in_fastq(fastq_left_fpath)
if num_pairs > LIMIT:
info(sample_name + ' the number of reads is higher than ' + str(LIMIT) +
', sampling from only first ' + str(LIMIT))
num_pairs = LIMIT
info(sample_name + ': ' + str(num_pairs) + ' reads')
num_downsample_pairs = int(downsample_to * num_pairs) if isinstance(downsample_to, float) else downsample_to
if num_pairs <= num_downsample_pairs:
info(sample_name + ': and it is less than ' + str(num_downsample_pairs) + ', so no downsampling.')
return fastq_left_fpath, fastq_right_fpath
else:
info(sample_name + ': downsampling to ' + str(num_downsample_pairs))
rand_records = sorted(random.sample(range(num_pairs), num_downsample_pairs))
info('Opening ' + fastq_left_fpath)
fh1 = open_gzipsafe(fastq_left_fpath)
info('Opening ' + fastq_right_fpath)
fh2 = open_gzipsafe(fastq_right_fpath) if fastq_right_fpath else None
out_files = (l_out_fpath, r_out_fpath) if r_out_fpath else (l_out_fpath,)
written_records = 0
with file_transaction(work_dir, out_files) as tx_out_files:
if isinstance(tx_out_files, six.string_types):
tx_out_f1 = tx_out_files
else:
tx_out_f1, tx_out_f2 = tx_out_files
info('Opening ' + str(tx_out_f1) + ' to write')
sub1 = open_gzipsafe(tx_out_f1, "w")
info('Opening ' + str(tx_out_f2) + ' to write')
sub2 = open_gzipsafe(tx_out_f2, "w") if r_out_fpath else None
rec_no = -1
for rr in rand_records:
while rec_no < rr:
rec_no += 1
for i in range(4): fh1.readline()
if fh2:
for i in range(4): fh2.readline()
for i in range(4):
sub1.write(fh1.readline())
if sub2:
sub2.write(fh2.readline())
written_records += 1
if written_records % 10000 == 0:
info(sample_name + ': written ' + str(written_records) + ', rec_no ' + str(rec_no + 1))
if rec_no > num_pairs:
info(sample_name + ' reached the limit of ' + str(num_pairs), ' read lines, stopping.')
break
info(sample_name + ': done, written ' + str(written_records) + ', rec_no ' + str(rec_no))
fh1.close()
sub1.close()
if fastq_right_fpath:
fh2.close()
sub2.close()
info(sample_name + ': done downsampling, saved to ' + l_out_fpath + ' and ' + r_out_fpath + ', total ' + str(written_records) + ' paired reads written')
return l_out_fpath, r_out_fpath | def function[downsample, parameter[work_dir, sample_name, fastq_left_fpath, fastq_right_fpath, downsample_to, num_pairs]]:
constant[ get N random headers from a fastq file without reading the
whole thing into memory
modified from: http://www.biostars.org/p/6544/
]
variable[sample_name] assign[=] <ast.BoolOp object at 0x7da18c4cf8b0>
variable[l_out_fpath] assign[=] call[name[make_downsampled_fpath], parameter[name[work_dir], name[fastq_left_fpath]]]
variable[r_out_fpath] assign[=] call[name[make_downsampled_fpath], parameter[name[work_dir], name[fastq_right_fpath]]]
if call[name[can_reuse], parameter[name[l_out_fpath], list[[<ast.Name object at 0x7da18c4cf670>, <ast.Name object at 0x7da18c4cdff0>]]]] begin[:]
return[tuple[[<ast.Name object at 0x7da18c4ce4a0>, <ast.Name object at 0x7da18c4cf610>]]]
call[name[info], parameter[binary_operation[constant[Processing ] + name[sample_name]]]]
if compare[name[num_pairs] is constant[None]] begin[:]
call[name[info], parameter[binary_operation[name[sample_name] + constant[: counting number of reads in fastq...]]]]
variable[num_pairs] assign[=] call[name[_count_records_in_fastq], parameter[name[fastq_left_fpath]]]
if compare[name[num_pairs] greater[>] name[LIMIT]] begin[:]
call[name[info], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[sample_name] + constant[ the number of reads is higher than ]] + call[name[str], parameter[name[LIMIT]]]] + constant[, sampling from only first ]] + call[name[str], parameter[name[LIMIT]]]]]]
variable[num_pairs] assign[=] name[LIMIT]
call[name[info], parameter[binary_operation[binary_operation[binary_operation[name[sample_name] + constant[: ]] + call[name[str], parameter[name[num_pairs]]]] + constant[ reads]]]]
variable[num_downsample_pairs] assign[=] <ast.IfExp object at 0x7da18c4ccee0>
if compare[name[num_pairs] less_or_equal[<=] name[num_downsample_pairs]] begin[:]
call[name[info], parameter[binary_operation[binary_operation[binary_operation[name[sample_name] + constant[: and it is less than ]] + call[name[str], parameter[name[num_downsample_pairs]]]] + constant[, so no downsampling.]]]]
return[tuple[[<ast.Name object at 0x7da18c4ce650>, <ast.Name object at 0x7da18c4cd540>]]]
call[name[info], parameter[binary_operation[constant[Opening ] + name[fastq_left_fpath]]]]
variable[fh1] assign[=] call[name[open_gzipsafe], parameter[name[fastq_left_fpath]]]
call[name[info], parameter[binary_operation[constant[Opening ] + name[fastq_right_fpath]]]]
variable[fh2] assign[=] <ast.IfExp object at 0x7da18c4ce950>
variable[out_files] assign[=] <ast.IfExp object at 0x7da18c4ce140>
variable[written_records] assign[=] constant[0]
with call[name[file_transaction], parameter[name[work_dir], name[out_files]]] begin[:]
if call[name[isinstance], parameter[name[tx_out_files], name[six].string_types]] begin[:]
variable[tx_out_f1] assign[=] name[tx_out_files]
call[name[info], parameter[binary_operation[binary_operation[constant[Opening ] + call[name[str], parameter[name[tx_out_f1]]]] + constant[ to write]]]]
variable[sub1] assign[=] call[name[open_gzipsafe], parameter[name[tx_out_f1], constant[w]]]
call[name[info], parameter[binary_operation[binary_operation[constant[Opening ] + call[name[str], parameter[name[tx_out_f2]]]] + constant[ to write]]]]
variable[sub2] assign[=] <ast.IfExp object at 0x7da18c4ced10>
variable[rec_no] assign[=] <ast.UnaryOp object at 0x7da18c4cc100>
for taget[name[rr]] in starred[name[rand_records]] begin[:]
while compare[name[rec_no] less[<] name[rr]] begin[:]
<ast.AugAssign object at 0x7da18c4cd840>
for taget[name[i]] in starred[call[name[range], parameter[constant[4]]]] begin[:]
call[name[fh1].readline, parameter[]]
if name[fh2] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[4]]]] begin[:]
call[name[fh2].readline, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[4]]]] begin[:]
call[name[sub1].write, parameter[call[name[fh1].readline, parameter[]]]]
if name[sub2] begin[:]
call[name[sub2].write, parameter[call[name[fh2].readline, parameter[]]]]
<ast.AugAssign object at 0x7da18f00ece0>
if compare[binary_operation[name[written_records] <ast.Mod object at 0x7da2590d6920> constant[10000]] equal[==] constant[0]] begin[:]
call[name[info], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[sample_name] + constant[: written ]] + call[name[str], parameter[name[written_records]]]] + constant[, rec_no ]] + call[name[str], parameter[binary_operation[name[rec_no] + constant[1]]]]]]]
if compare[name[rec_no] greater[>] name[num_pairs]] begin[:]
call[name[info], parameter[binary_operation[binary_operation[name[sample_name] + constant[ reached the limit of ]] + call[name[str], parameter[name[num_pairs]]]], constant[ read lines, stopping.]]]
break
call[name[info], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[sample_name] + constant[: done, written ]] + call[name[str], parameter[name[written_records]]]] + constant[, rec_no ]] + call[name[str], parameter[name[rec_no]]]]]]
call[name[fh1].close, parameter[]]
call[name[sub1].close, parameter[]]
if name[fastq_right_fpath] begin[:]
call[name[fh2].close, parameter[]]
call[name[sub2].close, parameter[]]
call[name[info], parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[sample_name] + constant[: done downsampling, saved to ]] + name[l_out_fpath]] + constant[ and ]] + name[r_out_fpath]] + constant[, total ]] + call[name[str], parameter[name[written_records]]]] + constant[ paired reads written]]]]
return[tuple[[<ast.Name object at 0x7da1b2345000>, <ast.Name object at 0x7da1b23461a0>]]] | keyword[def] identifier[downsample] ( identifier[work_dir] , identifier[sample_name] , identifier[fastq_left_fpath] , identifier[fastq_right_fpath] , identifier[downsample_to] , identifier[num_pairs] = keyword[None] ):
literal[string]
identifier[sample_name] = identifier[sample_name] keyword[or] identifier[splitext] ( literal[string] . identifier[join] ( identifier[lc] keyword[if] identifier[lc] == identifier[rc] keyword[else] literal[string] keyword[for] identifier[lc] , identifier[rc] keyword[in] identifier[zip] ( identifier[fastq_left_fpath] , identifier[fastq_right_fpath] )))[ literal[int] ]
identifier[l_out_fpath] = identifier[make_downsampled_fpath] ( identifier[work_dir] , identifier[fastq_left_fpath] )
identifier[r_out_fpath] = identifier[make_downsampled_fpath] ( identifier[work_dir] , identifier[fastq_right_fpath] )
keyword[if] identifier[can_reuse] ( identifier[l_out_fpath] ,[ identifier[fastq_left_fpath] , identifier[fastq_right_fpath] ]):
keyword[return] identifier[l_out_fpath] , identifier[r_out_fpath]
identifier[info] ( literal[string] + identifier[sample_name] )
keyword[if] identifier[num_pairs] keyword[is] keyword[None] :
identifier[info] ( identifier[sample_name] + literal[string] )
identifier[num_pairs] = identifier[_count_records_in_fastq] ( identifier[fastq_left_fpath] )
keyword[if] identifier[num_pairs] > identifier[LIMIT] :
identifier[info] ( identifier[sample_name] + literal[string] + identifier[str] ( identifier[LIMIT] )+
literal[string] + identifier[str] ( identifier[LIMIT] ))
identifier[num_pairs] = identifier[LIMIT]
identifier[info] ( identifier[sample_name] + literal[string] + identifier[str] ( identifier[num_pairs] )+ literal[string] )
identifier[num_downsample_pairs] = identifier[int] ( identifier[downsample_to] * identifier[num_pairs] ) keyword[if] identifier[isinstance] ( identifier[downsample_to] , identifier[float] ) keyword[else] identifier[downsample_to]
keyword[if] identifier[num_pairs] <= identifier[num_downsample_pairs] :
identifier[info] ( identifier[sample_name] + literal[string] + identifier[str] ( identifier[num_downsample_pairs] )+ literal[string] )
keyword[return] identifier[fastq_left_fpath] , identifier[fastq_right_fpath]
keyword[else] :
identifier[info] ( identifier[sample_name] + literal[string] + identifier[str] ( identifier[num_downsample_pairs] ))
identifier[rand_records] = identifier[sorted] ( identifier[random] . identifier[sample] ( identifier[range] ( identifier[num_pairs] ), identifier[num_downsample_pairs] ))
identifier[info] ( literal[string] + identifier[fastq_left_fpath] )
identifier[fh1] = identifier[open_gzipsafe] ( identifier[fastq_left_fpath] )
identifier[info] ( literal[string] + identifier[fastq_right_fpath] )
identifier[fh2] = identifier[open_gzipsafe] ( identifier[fastq_right_fpath] ) keyword[if] identifier[fastq_right_fpath] keyword[else] keyword[None]
identifier[out_files] =( identifier[l_out_fpath] , identifier[r_out_fpath] ) keyword[if] identifier[r_out_fpath] keyword[else] ( identifier[l_out_fpath] ,)
identifier[written_records] = literal[int]
keyword[with] identifier[file_transaction] ( identifier[work_dir] , identifier[out_files] ) keyword[as] identifier[tx_out_files] :
keyword[if] identifier[isinstance] ( identifier[tx_out_files] , identifier[six] . identifier[string_types] ):
identifier[tx_out_f1] = identifier[tx_out_files]
keyword[else] :
identifier[tx_out_f1] , identifier[tx_out_f2] = identifier[tx_out_files]
identifier[info] ( literal[string] + identifier[str] ( identifier[tx_out_f1] )+ literal[string] )
identifier[sub1] = identifier[open_gzipsafe] ( identifier[tx_out_f1] , literal[string] )
identifier[info] ( literal[string] + identifier[str] ( identifier[tx_out_f2] )+ literal[string] )
identifier[sub2] = identifier[open_gzipsafe] ( identifier[tx_out_f2] , literal[string] ) keyword[if] identifier[r_out_fpath] keyword[else] keyword[None]
identifier[rec_no] =- literal[int]
keyword[for] identifier[rr] keyword[in] identifier[rand_records] :
keyword[while] identifier[rec_no] < identifier[rr] :
identifier[rec_no] += literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ): identifier[fh1] . identifier[readline] ()
keyword[if] identifier[fh2] :
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ): identifier[fh2] . identifier[readline] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[sub1] . identifier[write] ( identifier[fh1] . identifier[readline] ())
keyword[if] identifier[sub2] :
identifier[sub2] . identifier[write] ( identifier[fh2] . identifier[readline] ())
identifier[written_records] += literal[int]
keyword[if] identifier[written_records] % literal[int] == literal[int] :
identifier[info] ( identifier[sample_name] + literal[string] + identifier[str] ( identifier[written_records] )+ literal[string] + identifier[str] ( identifier[rec_no] + literal[int] ))
keyword[if] identifier[rec_no] > identifier[num_pairs] :
identifier[info] ( identifier[sample_name] + literal[string] + identifier[str] ( identifier[num_pairs] ), literal[string] )
keyword[break]
identifier[info] ( identifier[sample_name] + literal[string] + identifier[str] ( identifier[written_records] )+ literal[string] + identifier[str] ( identifier[rec_no] ))
identifier[fh1] . identifier[close] ()
identifier[sub1] . identifier[close] ()
keyword[if] identifier[fastq_right_fpath] :
identifier[fh2] . identifier[close] ()
identifier[sub2] . identifier[close] ()
identifier[info] ( identifier[sample_name] + literal[string] + identifier[l_out_fpath] + literal[string] + identifier[r_out_fpath] + literal[string] + identifier[str] ( identifier[written_records] )+ literal[string] )
keyword[return] identifier[l_out_fpath] , identifier[r_out_fpath] | def downsample(work_dir, sample_name, fastq_left_fpath, fastq_right_fpath, downsample_to, num_pairs=None):
""" get N random headers from a fastq file without reading the
whole thing into memory
modified from: http://www.biostars.org/p/6544/
"""
sample_name = sample_name or splitext(''.join((lc if lc == rc else '' for (lc, rc) in zip(fastq_left_fpath, fastq_right_fpath))))[0]
l_out_fpath = make_downsampled_fpath(work_dir, fastq_left_fpath)
r_out_fpath = make_downsampled_fpath(work_dir, fastq_right_fpath)
if can_reuse(l_out_fpath, [fastq_left_fpath, fastq_right_fpath]):
return (l_out_fpath, r_out_fpath) # depends on [control=['if'], data=[]]
info('Processing ' + sample_name)
if num_pairs is None:
info(sample_name + ': counting number of reads in fastq...')
num_pairs = _count_records_in_fastq(fastq_left_fpath) # depends on [control=['if'], data=['num_pairs']]
if num_pairs > LIMIT:
info(sample_name + ' the number of reads is higher than ' + str(LIMIT) + ', sampling from only first ' + str(LIMIT))
num_pairs = LIMIT # depends on [control=['if'], data=['num_pairs', 'LIMIT']]
info(sample_name + ': ' + str(num_pairs) + ' reads')
num_downsample_pairs = int(downsample_to * num_pairs) if isinstance(downsample_to, float) else downsample_to
if num_pairs <= num_downsample_pairs:
info(sample_name + ': and it is less than ' + str(num_downsample_pairs) + ', so no downsampling.')
return (fastq_left_fpath, fastq_right_fpath) # depends on [control=['if'], data=['num_downsample_pairs']]
else:
info(sample_name + ': downsampling to ' + str(num_downsample_pairs))
rand_records = sorted(random.sample(range(num_pairs), num_downsample_pairs))
info('Opening ' + fastq_left_fpath)
fh1 = open_gzipsafe(fastq_left_fpath)
info('Opening ' + fastq_right_fpath)
fh2 = open_gzipsafe(fastq_right_fpath) if fastq_right_fpath else None
out_files = (l_out_fpath, r_out_fpath) if r_out_fpath else (l_out_fpath,)
written_records = 0
with file_transaction(work_dir, out_files) as tx_out_files:
if isinstance(tx_out_files, six.string_types):
tx_out_f1 = tx_out_files # depends on [control=['if'], data=[]]
else:
(tx_out_f1, tx_out_f2) = tx_out_files
info('Opening ' + str(tx_out_f1) + ' to write')
sub1 = open_gzipsafe(tx_out_f1, 'w')
info('Opening ' + str(tx_out_f2) + ' to write')
sub2 = open_gzipsafe(tx_out_f2, 'w') if r_out_fpath else None
rec_no = -1
for rr in rand_records:
while rec_no < rr:
rec_no += 1
for i in range(4):
fh1.readline() # depends on [control=['for'], data=[]]
if fh2:
for i in range(4):
fh2.readline() # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['rec_no']]
for i in range(4):
sub1.write(fh1.readline())
if sub2:
sub2.write(fh2.readline()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
written_records += 1
if written_records % 10000 == 0:
info(sample_name + ': written ' + str(written_records) + ', rec_no ' + str(rec_no + 1)) # depends on [control=['if'], data=[]]
if rec_no > num_pairs:
info(sample_name + ' reached the limit of ' + str(num_pairs), ' read lines, stopping.')
break # depends on [control=['if'], data=['num_pairs']] # depends on [control=['for'], data=['rr']]
info(sample_name + ': done, written ' + str(written_records) + ', rec_no ' + str(rec_no))
fh1.close()
sub1.close()
if fastq_right_fpath:
fh2.close()
sub2.close() # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['tx_out_files']]
info(sample_name + ': done downsampling, saved to ' + l_out_fpath + ' and ' + r_out_fpath + ', total ' + str(written_records) + ' paired reads written')
return (l_out_fpath, r_out_fpath) |
def _add_tc_script(self):
"""
generates tc_script.sh and adds it to included files
"""
# fill context
context = dict(tc_options=self.config.get('tc_options', []))
# import pdb; pdb.set_trace()
contents = self._render_template('tc_script.sh', context)
self.config.setdefault('files', []) # file list might be empty
# add tc_script.sh to list of included files
self._add_unique_file({
"path": "/tc_script.sh",
"contents": contents,
"mode": "755"
}) | def function[_add_tc_script, parameter[self]]:
constant[
generates tc_script.sh and adds it to included files
]
variable[context] assign[=] call[name[dict], parameter[]]
variable[contents] assign[=] call[name[self]._render_template, parameter[constant[tc_script.sh], name[context]]]
call[name[self].config.setdefault, parameter[constant[files], list[[]]]]
call[name[self]._add_unique_file, parameter[dictionary[[<ast.Constant object at 0x7da1b009c8e0>, <ast.Constant object at 0x7da1b009c970>, <ast.Constant object at 0x7da1b009c9a0>], [<ast.Constant object at 0x7da1b009cac0>, <ast.Name object at 0x7da1b009c850>, <ast.Constant object at 0x7da1b009c880>]]]] | keyword[def] identifier[_add_tc_script] ( identifier[self] ):
literal[string]
identifier[context] = identifier[dict] ( identifier[tc_options] = identifier[self] . identifier[config] . identifier[get] ( literal[string] ,[]))
identifier[contents] = identifier[self] . identifier[_render_template] ( literal[string] , identifier[context] )
identifier[self] . identifier[config] . identifier[setdefault] ( literal[string] ,[])
identifier[self] . identifier[_add_unique_file] ({
literal[string] : literal[string] ,
literal[string] : identifier[contents] ,
literal[string] : literal[string]
}) | def _add_tc_script(self):
"""
generates tc_script.sh and adds it to included files
"""
# fill context
context = dict(tc_options=self.config.get('tc_options', []))
# import pdb; pdb.set_trace()
contents = self._render_template('tc_script.sh', context)
self.config.setdefault('files', []) # file list might be empty
# add tc_script.sh to list of included files
self._add_unique_file({'path': '/tc_script.sh', 'contents': contents, 'mode': '755'}) |
def commit_api(api):
"""Commit to a particular API, and trigger ImportErrors on subsequent
dangerous imports"""
if api == QT_API_PYSIDE:
ID.forbid('PyQt4')
ID.forbid('PyQt5')
else:
ID.forbid('PySide') | def function[commit_api, parameter[api]]:
constant[Commit to a particular API, and trigger ImportErrors on subsequent
dangerous imports]
if compare[name[api] equal[==] name[QT_API_PYSIDE]] begin[:]
call[name[ID].forbid, parameter[constant[PyQt4]]]
call[name[ID].forbid, parameter[constant[PyQt5]]] | keyword[def] identifier[commit_api] ( identifier[api] ):
literal[string]
keyword[if] identifier[api] == identifier[QT_API_PYSIDE] :
identifier[ID] . identifier[forbid] ( literal[string] )
identifier[ID] . identifier[forbid] ( literal[string] )
keyword[else] :
identifier[ID] . identifier[forbid] ( literal[string] ) | def commit_api(api):
"""Commit to a particular API, and trigger ImportErrors on subsequent
dangerous imports"""
if api == QT_API_PYSIDE:
ID.forbid('PyQt4')
ID.forbid('PyQt5') # depends on [control=['if'], data=[]]
else:
ID.forbid('PySide') |
def _make_spec_file(self):
"""Generates the text of an RPM spec file.
Returns:
A list of strings containing the lines of text.
"""
# Note that bdist_rpm can be an old style class.
if issubclass(BdistRPMCommand, object):
spec_file = super(BdistRPMCommand, self)._make_spec_file()
else:
spec_file = bdist_rpm._make_spec_file(self)
if sys.version_info[0] < 3:
python_package = "python"
else:
python_package = "python3"
description = []
summary = ""
in_description = False
python_spec_file = []
for line in spec_file:
if line.startswith("Summary: "):
summary = line
elif line.startswith("BuildRequires: "):
line = "BuildRequires: {0:s}-setuptools".format(python_package)
elif line.startswith("Requires: "):
if python_package == "python3":
line = line.replace("python", "python3")
elif line.startswith("%description"):
in_description = True
elif line.startswith("%files"):
line = "%files -f INSTALLED_FILES -n {0:s}-%{{name}}".format(
python_package)
elif line.startswith("%prep"):
in_description = False
python_spec_file.append(
"%package -n {0:s}-%{{name}}".format(python_package))
python_spec_file.append("{0:s}".format(summary))
python_spec_file.append("")
python_spec_file.append(
"%description -n {0:s}-%{{name}}".format(python_package))
python_spec_file.extend(description)
elif in_description:
# Ignore leading white lines in the description.
if not description and not line:
continue
description.append(line)
python_spec_file.append(line)
return python_spec_file | def function[_make_spec_file, parameter[self]]:
constant[Generates the text of an RPM spec file.
Returns:
A list of strings containing the lines of text.
]
if call[name[issubclass], parameter[name[BdistRPMCommand], name[object]]] begin[:]
variable[spec_file] assign[=] call[call[name[super], parameter[name[BdistRPMCommand], name[self]]]._make_spec_file, parameter[]]
if compare[call[name[sys].version_info][constant[0]] less[<] constant[3]] begin[:]
variable[python_package] assign[=] constant[python]
variable[description] assign[=] list[[]]
variable[summary] assign[=] constant[]
variable[in_description] assign[=] constant[False]
variable[python_spec_file] assign[=] list[[]]
for taget[name[line]] in starred[name[spec_file]] begin[:]
if call[name[line].startswith, parameter[constant[Summary: ]]] begin[:]
variable[summary] assign[=] name[line]
call[name[python_spec_file].append, parameter[name[line]]]
return[name[python_spec_file]] | keyword[def] identifier[_make_spec_file] ( identifier[self] ):
literal[string]
keyword[if] identifier[issubclass] ( identifier[BdistRPMCommand] , identifier[object] ):
identifier[spec_file] = identifier[super] ( identifier[BdistRPMCommand] , identifier[self] ). identifier[_make_spec_file] ()
keyword[else] :
identifier[spec_file] = identifier[bdist_rpm] . identifier[_make_spec_file] ( identifier[self] )
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]< literal[int] :
identifier[python_package] = literal[string]
keyword[else] :
identifier[python_package] = literal[string]
identifier[description] =[]
identifier[summary] = literal[string]
identifier[in_description] = keyword[False]
identifier[python_spec_file] =[]
keyword[for] identifier[line] keyword[in] identifier[spec_file] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[summary] = identifier[line]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[line] = literal[string] . identifier[format] ( identifier[python_package] )
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[python_package] == literal[string] :
identifier[line] = identifier[line] . identifier[replace] ( literal[string] , literal[string] )
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[in_description] = keyword[True]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[line] = literal[string] . identifier[format] (
identifier[python_package] )
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[in_description] = keyword[False]
identifier[python_spec_file] . identifier[append] (
literal[string] . identifier[format] ( identifier[python_package] ))
identifier[python_spec_file] . identifier[append] ( literal[string] . identifier[format] ( identifier[summary] ))
identifier[python_spec_file] . identifier[append] ( literal[string] )
identifier[python_spec_file] . identifier[append] (
literal[string] . identifier[format] ( identifier[python_package] ))
identifier[python_spec_file] . identifier[extend] ( identifier[description] )
keyword[elif] identifier[in_description] :
keyword[if] keyword[not] identifier[description] keyword[and] keyword[not] identifier[line] :
keyword[continue]
identifier[description] . identifier[append] ( identifier[line] )
identifier[python_spec_file] . identifier[append] ( identifier[line] )
keyword[return] identifier[python_spec_file] | def _make_spec_file(self):
"""Generates the text of an RPM spec file.
Returns:
A list of strings containing the lines of text.
"""
# Note that bdist_rpm can be an old style class.
if issubclass(BdistRPMCommand, object):
spec_file = super(BdistRPMCommand, self)._make_spec_file() # depends on [control=['if'], data=[]]
else:
spec_file = bdist_rpm._make_spec_file(self)
if sys.version_info[0] < 3:
python_package = 'python' # depends on [control=['if'], data=[]]
else:
python_package = 'python3'
description = []
summary = ''
in_description = False
python_spec_file = []
for line in spec_file:
if line.startswith('Summary: '):
summary = line # depends on [control=['if'], data=[]]
elif line.startswith('BuildRequires: '):
line = 'BuildRequires: {0:s}-setuptools'.format(python_package) # depends on [control=['if'], data=[]]
elif line.startswith('Requires: '):
if python_package == 'python3':
line = line.replace('python', 'python3') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif line.startswith('%description'):
in_description = True # depends on [control=['if'], data=[]]
elif line.startswith('%files'):
line = '%files -f INSTALLED_FILES -n {0:s}-%{{name}}'.format(python_package) # depends on [control=['if'], data=[]]
elif line.startswith('%prep'):
in_description = False
python_spec_file.append('%package -n {0:s}-%{{name}}'.format(python_package))
python_spec_file.append('{0:s}'.format(summary))
python_spec_file.append('')
python_spec_file.append('%description -n {0:s}-%{{name}}'.format(python_package))
python_spec_file.extend(description) # depends on [control=['if'], data=[]]
elif in_description:
# Ignore leading white lines in the description.
if not description and (not line):
continue # depends on [control=['if'], data=[]]
description.append(line) # depends on [control=['if'], data=[]]
python_spec_file.append(line) # depends on [control=['for'], data=['line']]
return python_spec_file |
def toggle(s):
"""
Toggle back and forth between a name and a tuple representation.
:param str s: a string which is either a text name, or a tuple-string:
a string with three numbers separated by commas
:returns: if the string was a text name, return a tuple. If it's a
tuple-string and it corresponds to a text name, return the text
name, else return the original tuple-string.
"""
is_numeric = ',' in s or s.startswith('0x') or s.startswith('#')
c = name_to_color(s)
return color_to_name(c) if is_numeric else str(c) | def function[toggle, parameter[s]]:
constant[
Toggle back and forth between a name and a tuple representation.
:param str s: a string which is either a text name, or a tuple-string:
a string with three numbers separated by commas
:returns: if the string was a text name, return a tuple. If it's a
tuple-string and it corresponds to a text name, return the text
name, else return the original tuple-string.
]
variable[is_numeric] assign[=] <ast.BoolOp object at 0x7da1b007c970>
variable[c] assign[=] call[name[name_to_color], parameter[name[s]]]
return[<ast.IfExp object at 0x7da1b007dbd0>] | keyword[def] identifier[toggle] ( identifier[s] ):
literal[string]
identifier[is_numeric] = literal[string] keyword[in] identifier[s] keyword[or] identifier[s] . identifier[startswith] ( literal[string] ) keyword[or] identifier[s] . identifier[startswith] ( literal[string] )
identifier[c] = identifier[name_to_color] ( identifier[s] )
keyword[return] identifier[color_to_name] ( identifier[c] ) keyword[if] identifier[is_numeric] keyword[else] identifier[str] ( identifier[c] ) | def toggle(s):
"""
Toggle back and forth between a name and a tuple representation.
:param str s: a string which is either a text name, or a tuple-string:
a string with three numbers separated by commas
:returns: if the string was a text name, return a tuple. If it's a
tuple-string and it corresponds to a text name, return the text
name, else return the original tuple-string.
"""
is_numeric = ',' in s or s.startswith('0x') or s.startswith('#')
c = name_to_color(s)
return color_to_name(c) if is_numeric else str(c) |
def draw_residual(x, y, yerr, xerr,
show_errbars=True, ax=None,
zero_line=True, grid=True,
**kwargs):
"""Draw a residual plot on the axis.
By default, if show_errbars if True, residuals are drawn as blue points
with errorbars with no endcaps. If show_errbars is False, residuals are
drawn as a bar graph with black bars.
**Arguments**
- **x** array of numbers, x-coordinates
- **y** array of numbers, y-coordinates
- **yerr** array of numbers, the uncertainty on the y-values
- **xerr** array of numbers, the uncertainty on the x-values
- **show_errbars** If True, draw the data as a bar plot, else as an
errorbar plot
- **ax** Optional matplotlib axis instance on which to draw the plot
- **zero_line** If True, draw a red line at :math:`y = 0` along the
full extent in :math:`x`
- **grid** If True, draw gridlines
- **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or
``ax.bar`` (if ``show_errbars`` if False)
**Returns**
The matplotlib axis instance the plot was drawn on.
"""
from matplotlib import pyplot as plt
ax = plt.gca() if ax is None else ax
if show_errbars:
plotopts = dict(fmt='b.', capsize=0)
plotopts.update(kwargs)
pp = ax.errorbar(x, y, yerr, xerr, zorder=0, **plotopts)
else:
plotopts = dict(color='k')
plotopts.update(kwargs)
pp = ax.bar(x - xerr, y, width=2*xerr, **plotopts)
if zero_line:
ax.plot([x[0] - xerr[0], x[-1] + xerr[-1]], [0, 0], 'r-', zorder=2)
# Take the `grid` kwarg to mean 'add a grid if True'; if grid is False and
# we called ax.grid(False) then any existing grid on ax would be turned off
if grid:
ax.grid(grid)
return ax | def function[draw_residual, parameter[x, y, yerr, xerr, show_errbars, ax, zero_line, grid]]:
constant[Draw a residual plot on the axis.
By default, if show_errbars if True, residuals are drawn as blue points
with errorbars with no endcaps. If show_errbars is False, residuals are
drawn as a bar graph with black bars.
**Arguments**
- **x** array of numbers, x-coordinates
- **y** array of numbers, y-coordinates
- **yerr** array of numbers, the uncertainty on the y-values
- **xerr** array of numbers, the uncertainty on the x-values
- **show_errbars** If True, draw the data as a bar plot, else as an
errorbar plot
- **ax** Optional matplotlib axis instance on which to draw the plot
- **zero_line** If True, draw a red line at :math:`y = 0` along the
full extent in :math:`x`
- **grid** If True, draw gridlines
- **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or
``ax.bar`` (if ``show_errbars`` if False)
**Returns**
The matplotlib axis instance the plot was drawn on.
]
from relative_module[matplotlib] import module[pyplot]
variable[ax] assign[=] <ast.IfExp object at 0x7da1b0f0e6b0>
if name[show_errbars] begin[:]
variable[plotopts] assign[=] call[name[dict], parameter[]]
call[name[plotopts].update, parameter[name[kwargs]]]
variable[pp] assign[=] call[name[ax].errorbar, parameter[name[x], name[y], name[yerr], name[xerr]]]
if name[zero_line] begin[:]
call[name[ax].plot, parameter[list[[<ast.BinOp object at 0x7da1b0f0d6c0>, <ast.BinOp object at 0x7da1b0f0f490>]], list[[<ast.Constant object at 0x7da1b0f0f190>, <ast.Constant object at 0x7da1b0f0d9f0>]], constant[r-]]]
if name[grid] begin[:]
call[name[ax].grid, parameter[name[grid]]]
return[name[ax]] | keyword[def] identifier[draw_residual] ( identifier[x] , identifier[y] , identifier[yerr] , identifier[xerr] ,
identifier[show_errbars] = keyword[True] , identifier[ax] = keyword[None] ,
identifier[zero_line] = keyword[True] , identifier[grid] = keyword[True] ,
** identifier[kwargs] ):
literal[string]
keyword[from] identifier[matplotlib] keyword[import] identifier[pyplot] keyword[as] identifier[plt]
identifier[ax] = identifier[plt] . identifier[gca] () keyword[if] identifier[ax] keyword[is] keyword[None] keyword[else] identifier[ax]
keyword[if] identifier[show_errbars] :
identifier[plotopts] = identifier[dict] ( identifier[fmt] = literal[string] , identifier[capsize] = literal[int] )
identifier[plotopts] . identifier[update] ( identifier[kwargs] )
identifier[pp] = identifier[ax] . identifier[errorbar] ( identifier[x] , identifier[y] , identifier[yerr] , identifier[xerr] , identifier[zorder] = literal[int] ,** identifier[plotopts] )
keyword[else] :
identifier[plotopts] = identifier[dict] ( identifier[color] = literal[string] )
identifier[plotopts] . identifier[update] ( identifier[kwargs] )
identifier[pp] = identifier[ax] . identifier[bar] ( identifier[x] - identifier[xerr] , identifier[y] , identifier[width] = literal[int] * identifier[xerr] ,** identifier[plotopts] )
keyword[if] identifier[zero_line] :
identifier[ax] . identifier[plot] ([ identifier[x] [ literal[int] ]- identifier[xerr] [ literal[int] ], identifier[x] [- literal[int] ]+ identifier[xerr] [- literal[int] ]],[ literal[int] , literal[int] ], literal[string] , identifier[zorder] = literal[int] )
keyword[if] identifier[grid] :
identifier[ax] . identifier[grid] ( identifier[grid] )
keyword[return] identifier[ax] | def draw_residual(x, y, yerr, xerr, show_errbars=True, ax=None, zero_line=True, grid=True, **kwargs):
"""Draw a residual plot on the axis.
By default, if show_errbars if True, residuals are drawn as blue points
with errorbars with no endcaps. If show_errbars is False, residuals are
drawn as a bar graph with black bars.
**Arguments**
- **x** array of numbers, x-coordinates
- **y** array of numbers, y-coordinates
- **yerr** array of numbers, the uncertainty on the y-values
- **xerr** array of numbers, the uncertainty on the x-values
- **show_errbars** If True, draw the data as a bar plot, else as an
errorbar plot
- **ax** Optional matplotlib axis instance on which to draw the plot
- **zero_line** If True, draw a red line at :math:`y = 0` along the
full extent in :math:`x`
- **grid** If True, draw gridlines
- **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or
``ax.bar`` (if ``show_errbars`` if False)
**Returns**
The matplotlib axis instance the plot was drawn on.
"""
from matplotlib import pyplot as plt
ax = plt.gca() if ax is None else ax
if show_errbars:
plotopts = dict(fmt='b.', capsize=0)
plotopts.update(kwargs)
pp = ax.errorbar(x, y, yerr, xerr, zorder=0, **plotopts) # depends on [control=['if'], data=[]]
else:
plotopts = dict(color='k')
plotopts.update(kwargs)
pp = ax.bar(x - xerr, y, width=2 * xerr, **plotopts)
if zero_line:
ax.plot([x[0] - xerr[0], x[-1] + xerr[-1]], [0, 0], 'r-', zorder=2) # depends on [control=['if'], data=[]]
# Take the `grid` kwarg to mean 'add a grid if True'; if grid is False and
# we called ax.grid(False) then any existing grid on ax would be turned off
if grid:
ax.grid(grid) # depends on [control=['if'], data=[]]
return ax |
def iflag_unique_items(list_):
"""
Returns a list of flags corresponding to the first time an item is seen
Args:
list_ (list): list of items
Returns:
flag_iter
"""
seen = set()
def unseen(item):
if item in seen:
return False
seen.add(item)
return True
flag_iter = (unseen(item) for item in list_)
return flag_iter | def function[iflag_unique_items, parameter[list_]]:
constant[
Returns a list of flags corresponding to the first time an item is seen
Args:
list_ (list): list of items
Returns:
flag_iter
]
variable[seen] assign[=] call[name[set], parameter[]]
def function[unseen, parameter[item]]:
if compare[name[item] in name[seen]] begin[:]
return[constant[False]]
call[name[seen].add, parameter[name[item]]]
return[constant[True]]
variable[flag_iter] assign[=] <ast.GeneratorExp object at 0x7da1b24e8370>
return[name[flag_iter]] | keyword[def] identifier[iflag_unique_items] ( identifier[list_] ):
literal[string]
identifier[seen] = identifier[set] ()
keyword[def] identifier[unseen] ( identifier[item] ):
keyword[if] identifier[item] keyword[in] identifier[seen] :
keyword[return] keyword[False]
identifier[seen] . identifier[add] ( identifier[item] )
keyword[return] keyword[True]
identifier[flag_iter] =( identifier[unseen] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[list_] )
keyword[return] identifier[flag_iter] | def iflag_unique_items(list_):
"""
Returns a list of flags corresponding to the first time an item is seen
Args:
list_ (list): list of items
Returns:
flag_iter
"""
seen = set()
def unseen(item):
if item in seen:
return False # depends on [control=['if'], data=[]]
seen.add(item)
return True
flag_iter = (unseen(item) for item in list_)
return flag_iter |
def format(self, info_dict, delimiter='/'):
"""
This formatter will take a data structure that
represent a tree and will print all the paths
from the root to the leaves
in our case it will print each value and the keys
that needed to get to it, for example:
vm0:
net: lago
memory: 1024
will be output as:
vm0/net/lago
vm0/memory/1024
Args:
info_dict (dict): information to reformat
delimiter (str): a delimiter for the path components
Returns:
str: String representing the formatted info
"""
def dfs(father, path, acc):
if isinstance(father, list):
for child in father:
dfs(child, path, acc)
elif isinstance(father, collections.Mapping):
for child in sorted(father.items(), key=itemgetter(0)), :
dfs(child, path, acc)
elif isinstance(father, tuple):
path = copy.copy(path)
path.append(father[0])
dfs(father[1], path, acc)
else:
# join the last key with it's value
path[-1] = '{}: {}'.format(path[-1], str(father))
acc.append(delimiter.join(path))
result = []
dfs(info_dict.get('Prefix') or info_dict, [], result)
return '\n'.join(result) | def function[format, parameter[self, info_dict, delimiter]]:
constant[
This formatter will take a data structure that
represent a tree and will print all the paths
from the root to the leaves
in our case it will print each value and the keys
that needed to get to it, for example:
vm0:
net: lago
memory: 1024
will be output as:
vm0/net/lago
vm0/memory/1024
Args:
info_dict (dict): information to reformat
delimiter (str): a delimiter for the path components
Returns:
str: String representing the formatted info
]
def function[dfs, parameter[father, path, acc]]:
if call[name[isinstance], parameter[name[father], name[list]]] begin[:]
for taget[name[child]] in starred[name[father]] begin[:]
call[name[dfs], parameter[name[child], name[path], name[acc]]]
variable[result] assign[=] list[[]]
call[name[dfs], parameter[<ast.BoolOp object at 0x7da1b2346dd0>, list[[]], name[result]]]
return[call[constant[
].join, parameter[name[result]]]] | keyword[def] identifier[format] ( identifier[self] , identifier[info_dict] , identifier[delimiter] = literal[string] ):
literal[string]
keyword[def] identifier[dfs] ( identifier[father] , identifier[path] , identifier[acc] ):
keyword[if] identifier[isinstance] ( identifier[father] , identifier[list] ):
keyword[for] identifier[child] keyword[in] identifier[father] :
identifier[dfs] ( identifier[child] , identifier[path] , identifier[acc] )
keyword[elif] identifier[isinstance] ( identifier[father] , identifier[collections] . identifier[Mapping] ):
keyword[for] identifier[child] keyword[in] identifier[sorted] ( identifier[father] . identifier[items] (), identifier[key] = identifier[itemgetter] ( literal[int] )),:
identifier[dfs] ( identifier[child] , identifier[path] , identifier[acc] )
keyword[elif] identifier[isinstance] ( identifier[father] , identifier[tuple] ):
identifier[path] = identifier[copy] . identifier[copy] ( identifier[path] )
identifier[path] . identifier[append] ( identifier[father] [ literal[int] ])
identifier[dfs] ( identifier[father] [ literal[int] ], identifier[path] , identifier[acc] )
keyword[else] :
identifier[path] [- literal[int] ]= literal[string] . identifier[format] ( identifier[path] [- literal[int] ], identifier[str] ( identifier[father] ))
identifier[acc] . identifier[append] ( identifier[delimiter] . identifier[join] ( identifier[path] ))
identifier[result] =[]
identifier[dfs] ( identifier[info_dict] . identifier[get] ( literal[string] ) keyword[or] identifier[info_dict] ,[], identifier[result] )
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def format(self, info_dict, delimiter='/'):
"""
This formatter will take a data structure that
represent a tree and will print all the paths
from the root to the leaves
in our case it will print each value and the keys
that needed to get to it, for example:
vm0:
net: lago
memory: 1024
will be output as:
vm0/net/lago
vm0/memory/1024
Args:
info_dict (dict): information to reformat
delimiter (str): a delimiter for the path components
Returns:
str: String representing the formatted info
"""
def dfs(father, path, acc):
if isinstance(father, list):
for child in father:
dfs(child, path, acc) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
elif isinstance(father, collections.Mapping):
for child in (sorted(father.items(), key=itemgetter(0)),):
dfs(child, path, acc) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
elif isinstance(father, tuple):
path = copy.copy(path)
path.append(father[0])
dfs(father[1], path, acc) # depends on [control=['if'], data=[]]
else:
# join the last key with it's value
path[-1] = '{}: {}'.format(path[-1], str(father))
acc.append(delimiter.join(path))
result = []
dfs(info_dict.get('Prefix') or info_dict, [], result)
return '\n'.join(result) |
def new(self, *args, **kwargs):
'''
Create and return a new instance.
'''
inst = self.clazz()
self.storage.append(inst)
# set all attributes with an initial default value
referential_attributes = dict()
for name, ty in self.attributes:
if name not in self.referential_attributes:
value = self.default_value(ty)
setattr(inst, name, value)
# set all positional arguments
for attr, value in zip(self.attributes, args):
name, ty = attr
if name not in self.referential_attributes:
setattr(inst, name, value)
else:
referential_attributes[name] = value
# set all named arguments
for name, value in kwargs.items():
if name not in self.referential_attributes:
setattr(inst, name, value)
else:
referential_attributes[name] = value
if not referential_attributes:
return inst
# batch relate referential attributes
for link in self.links.values():
if set(link.key_map.values()) - set(referential_attributes.keys()):
continue
kwargs = dict()
for key, value in link.key_map.items():
kwargs[key] = referential_attributes[value]
if not kwargs:
continue
for other_inst in link.to_metaclass.query(kwargs):
relate(other_inst, inst, link.rel_id, link.phrase)
for name, value in referential_attributes.items():
if getattr(inst, name) != value:
logger.warning('unable to assign %s to %s', name, inst)
return inst | def function[new, parameter[self]]:
constant[
Create and return a new instance.
]
variable[inst] assign[=] call[name[self].clazz, parameter[]]
call[name[self].storage.append, parameter[name[inst]]]
variable[referential_attributes] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da204621750>, <ast.Name object at 0x7da204622350>]]] in starred[name[self].attributes] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].referential_attributes] begin[:]
variable[value] assign[=] call[name[self].default_value, parameter[name[ty]]]
call[name[setattr], parameter[name[inst], name[name], name[value]]]
for taget[tuple[[<ast.Name object at 0x7da204621480>, <ast.Name object at 0x7da2046215a0>]]] in starred[call[name[zip], parameter[name[self].attributes, name[args]]]] begin[:]
<ast.Tuple object at 0x7da2046232e0> assign[=] name[attr]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].referential_attributes] begin[:]
call[name[setattr], parameter[name[inst], name[name], name[value]]]
for taget[tuple[[<ast.Name object at 0x7da1b0291ed0>, <ast.Name object at 0x7da1b02938b0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].referential_attributes] begin[:]
call[name[setattr], parameter[name[inst], name[name], name[value]]]
if <ast.UnaryOp object at 0x7da1b0291240> begin[:]
return[name[inst]]
for taget[name[link]] in starred[call[name[self].links.values, parameter[]]] begin[:]
if binary_operation[call[name[set], parameter[call[name[link].key_map.values, parameter[]]]] - call[name[set], parameter[call[name[referential_attributes].keys, parameter[]]]]] begin[:]
continue
variable[kwargs] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b02902e0>, <ast.Name object at 0x7da1b0293a60>]]] in starred[call[name[link].key_map.items, parameter[]]] begin[:]
call[name[kwargs]][name[key]] assign[=] call[name[referential_attributes]][name[value]]
if <ast.UnaryOp object at 0x7da1b0291030> begin[:]
continue
for taget[name[other_inst]] in starred[call[name[link].to_metaclass.query, parameter[name[kwargs]]]] begin[:]
call[name[relate], parameter[name[other_inst], name[inst], name[link].rel_id, name[link].phrase]]
for taget[tuple[[<ast.Name object at 0x7da20e963820>, <ast.Name object at 0x7da20e963dc0>]]] in starred[call[name[referential_attributes].items, parameter[]]] begin[:]
if compare[call[name[getattr], parameter[name[inst], name[name]]] not_equal[!=] name[value]] begin[:]
call[name[logger].warning, parameter[constant[unable to assign %s to %s], name[name], name[inst]]]
return[name[inst]] | keyword[def] identifier[new] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[inst] = identifier[self] . identifier[clazz] ()
identifier[self] . identifier[storage] . identifier[append] ( identifier[inst] )
identifier[referential_attributes] = identifier[dict] ()
keyword[for] identifier[name] , identifier[ty] keyword[in] identifier[self] . identifier[attributes] :
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[referential_attributes] :
identifier[value] = identifier[self] . identifier[default_value] ( identifier[ty] )
identifier[setattr] ( identifier[inst] , identifier[name] , identifier[value] )
keyword[for] identifier[attr] , identifier[value] keyword[in] identifier[zip] ( identifier[self] . identifier[attributes] , identifier[args] ):
identifier[name] , identifier[ty] = identifier[attr]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[referential_attributes] :
identifier[setattr] ( identifier[inst] , identifier[name] , identifier[value] )
keyword[else] :
identifier[referential_attributes] [ identifier[name] ]= identifier[value]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[referential_attributes] :
identifier[setattr] ( identifier[inst] , identifier[name] , identifier[value] )
keyword[else] :
identifier[referential_attributes] [ identifier[name] ]= identifier[value]
keyword[if] keyword[not] identifier[referential_attributes] :
keyword[return] identifier[inst]
keyword[for] identifier[link] keyword[in] identifier[self] . identifier[links] . identifier[values] ():
keyword[if] identifier[set] ( identifier[link] . identifier[key_map] . identifier[values] ())- identifier[set] ( identifier[referential_attributes] . identifier[keys] ()):
keyword[continue]
identifier[kwargs] = identifier[dict] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[link] . identifier[key_map] . identifier[items] ():
identifier[kwargs] [ identifier[key] ]= identifier[referential_attributes] [ identifier[value] ]
keyword[if] keyword[not] identifier[kwargs] :
keyword[continue]
keyword[for] identifier[other_inst] keyword[in] identifier[link] . identifier[to_metaclass] . identifier[query] ( identifier[kwargs] ):
identifier[relate] ( identifier[other_inst] , identifier[inst] , identifier[link] . identifier[rel_id] , identifier[link] . identifier[phrase] )
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[referential_attributes] . identifier[items] ():
keyword[if] identifier[getattr] ( identifier[inst] , identifier[name] )!= identifier[value] :
identifier[logger] . identifier[warning] ( literal[string] , identifier[name] , identifier[inst] )
keyword[return] identifier[inst] | def new(self, *args, **kwargs):
"""
Create and return a new instance.
"""
inst = self.clazz()
self.storage.append(inst)
# set all attributes with an initial default value
referential_attributes = dict()
for (name, ty) in self.attributes:
if name not in self.referential_attributes:
value = self.default_value(ty)
setattr(inst, name, value) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=[]]
# set all positional arguments
for (attr, value) in zip(self.attributes, args):
(name, ty) = attr
if name not in self.referential_attributes:
setattr(inst, name, value) # depends on [control=['if'], data=['name']]
else:
referential_attributes[name] = value # depends on [control=['for'], data=[]]
# set all named arguments
for (name, value) in kwargs.items():
if name not in self.referential_attributes:
setattr(inst, name, value) # depends on [control=['if'], data=['name']]
else:
referential_attributes[name] = value # depends on [control=['for'], data=[]]
if not referential_attributes:
return inst # depends on [control=['if'], data=[]] # batch relate referential attributes
for link in self.links.values():
if set(link.key_map.values()) - set(referential_attributes.keys()):
continue # depends on [control=['if'], data=[]]
kwargs = dict()
for (key, value) in link.key_map.items():
kwargs[key] = referential_attributes[value] # depends on [control=['for'], data=[]]
if not kwargs:
continue # depends on [control=['if'], data=[]]
for other_inst in link.to_metaclass.query(kwargs):
relate(other_inst, inst, link.rel_id, link.phrase) # depends on [control=['for'], data=['other_inst']] # depends on [control=['for'], data=['link']]
for (name, value) in referential_attributes.items():
if getattr(inst, name) != value:
logger.warning('unable to assign %s to %s', name, inst) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return inst |
def register_view(self, view):
"""Called when the View was registered"""
super(GraphicalEditorController, self).register_view(view)
self.view.connect('meta_data_changed', self._meta_data_changed)
self.focus_changed_handler_id = self.view.editor.connect('focus-changed', self._move_focused_item_into_viewport)
self.view.editor.connect("drag-data-received", self.on_drag_data_received)
self.drag_motion_handler_id = self.view.editor.connect("drag-motion", self.on_drag_motion)
self.setup_canvas() | def function[register_view, parameter[self, view]]:
constant[Called when the View was registered]
call[call[name[super], parameter[name[GraphicalEditorController], name[self]]].register_view, parameter[name[view]]]
call[name[self].view.connect, parameter[constant[meta_data_changed], name[self]._meta_data_changed]]
name[self].focus_changed_handler_id assign[=] call[name[self].view.editor.connect, parameter[constant[focus-changed], name[self]._move_focused_item_into_viewport]]
call[name[self].view.editor.connect, parameter[constant[drag-data-received], name[self].on_drag_data_received]]
name[self].drag_motion_handler_id assign[=] call[name[self].view.editor.connect, parameter[constant[drag-motion], name[self].on_drag_motion]]
call[name[self].setup_canvas, parameter[]] | keyword[def] identifier[register_view] ( identifier[self] , identifier[view] ):
literal[string]
identifier[super] ( identifier[GraphicalEditorController] , identifier[self] ). identifier[register_view] ( identifier[view] )
identifier[self] . identifier[view] . identifier[connect] ( literal[string] , identifier[self] . identifier[_meta_data_changed] )
identifier[self] . identifier[focus_changed_handler_id] = identifier[self] . identifier[view] . identifier[editor] . identifier[connect] ( literal[string] , identifier[self] . identifier[_move_focused_item_into_viewport] )
identifier[self] . identifier[view] . identifier[editor] . identifier[connect] ( literal[string] , identifier[self] . identifier[on_drag_data_received] )
identifier[self] . identifier[drag_motion_handler_id] = identifier[self] . identifier[view] . identifier[editor] . identifier[connect] ( literal[string] , identifier[self] . identifier[on_drag_motion] )
identifier[self] . identifier[setup_canvas] () | def register_view(self, view):
"""Called when the View was registered"""
super(GraphicalEditorController, self).register_view(view)
self.view.connect('meta_data_changed', self._meta_data_changed)
self.focus_changed_handler_id = self.view.editor.connect('focus-changed', self._move_focused_item_into_viewport)
self.view.editor.connect('drag-data-received', self.on_drag_data_received)
self.drag_motion_handler_id = self.view.editor.connect('drag-motion', self.on_drag_motion)
self.setup_canvas() |
def export(fn):
""" Export decorator
Please refer to the following SO article for details: https://stackoverflow.com/a/35710527
"""
mod = sys.modules[fn.__module__]
if hasattr(mod, '__all__'):
mod.__all__.append(fn.__name__)
else:
mod.__all__ = [fn.__name__]
return fn | def function[export, parameter[fn]]:
constant[ Export decorator
Please refer to the following SO article for details: https://stackoverflow.com/a/35710527
]
variable[mod] assign[=] call[name[sys].modules][name[fn].__module__]
if call[name[hasattr], parameter[name[mod], constant[__all__]]] begin[:]
call[name[mod].__all__.append, parameter[name[fn].__name__]]
return[name[fn]] | keyword[def] identifier[export] ( identifier[fn] ):
literal[string]
identifier[mod] = identifier[sys] . identifier[modules] [ identifier[fn] . identifier[__module__] ]
keyword[if] identifier[hasattr] ( identifier[mod] , literal[string] ):
identifier[mod] . identifier[__all__] . identifier[append] ( identifier[fn] . identifier[__name__] )
keyword[else] :
identifier[mod] . identifier[__all__] =[ identifier[fn] . identifier[__name__] ]
keyword[return] identifier[fn] | def export(fn):
""" Export decorator
Please refer to the following SO article for details: https://stackoverflow.com/a/35710527
"""
mod = sys.modules[fn.__module__]
if hasattr(mod, '__all__'):
mod.__all__.append(fn.__name__) # depends on [control=['if'], data=[]]
else:
mod.__all__ = [fn.__name__]
return fn |
def PluRunOff_cowinners(self, profile):
"""
Returns a list that associates all the winners of a profile under Plurality with Runoff rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
# Initialization
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
ranking = MechanismPlurality().getRanking(profile)
known_winners = set()
# 1st round: find the top 2 candidates in plurality scores
top_2_combinations = []
if len(ranking[0][0]) > 1:
for cand1, cand2 in itertools.combinations(ranking[0][0], 2):
top_2_combinations.append([cand1, cand2])
else:
max_cand = ranking[0][0][0]
if len(ranking[0][1]) > 1:
for second_max_cand in ranking[0][1]:
top_2_combinations.append([max_cand, second_max_cand])
else:
second_max_cand = ranking[0][1][0]
top_2_combinations.append([max_cand, second_max_cand])
# 2nd round: find the candidate with maximum plurality score
for top_2 in top_2_combinations:
dict_top2 = {top_2[0]: 0, top_2[1]: 0}
for i in range(len_prefcounts):
vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2}
top_position = min(vote_top2.values())
keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position]
for key in keys:
dict_top2[key] += prefcounts[i]
max_value = max(dict_top2.values())
winners = [y for y in dict_top2.keys() if dict_top2[y] == max_value]
known_winners = known_winners | set(winners)
return sorted(known_winners) | def function[PluRunOff_cowinners, parameter[self, profile]]:
constant[
Returns a list that associates all the winners of a profile under Plurality with Runoff rule.
:ivar Profile profile: A Profile object that represents an election profile.
]
variable[elecType] assign[=] call[name[profile].getElecType, parameter[]]
if <ast.BoolOp object at 0x7da18ede74c0> begin[:]
call[name[print], parameter[constant[ERROR: unsupported election type]]]
call[name[exit], parameter[]]
variable[prefcounts] assign[=] call[name[profile].getPreferenceCounts, parameter[]]
variable[len_prefcounts] assign[=] call[name[len], parameter[name[prefcounts]]]
variable[rankmaps] assign[=] call[name[profile].getRankMaps, parameter[]]
variable[ranking] assign[=] call[call[name[MechanismPlurality], parameter[]].getRanking, parameter[name[profile]]]
variable[known_winners] assign[=] call[name[set], parameter[]]
variable[top_2_combinations] assign[=] list[[]]
if compare[call[name[len], parameter[call[call[name[ranking]][constant[0]]][constant[0]]]] greater[>] constant[1]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18ede6ec0>, <ast.Name object at 0x7da18ede78e0>]]] in starred[call[name[itertools].combinations, parameter[call[call[name[ranking]][constant[0]]][constant[0]], constant[2]]]] begin[:]
call[name[top_2_combinations].append, parameter[list[[<ast.Name object at 0x7da18ede4f10>, <ast.Name object at 0x7da18ede6170>]]]]
for taget[name[top_2]] in starred[name[top_2_combinations]] begin[:]
variable[dict_top2] assign[=] dictionary[[<ast.Subscript object at 0x7da18ede6470>, <ast.Subscript object at 0x7da18ede4df0>], [<ast.Constant object at 0x7da18ede52a0>, <ast.Constant object at 0x7da18ede4940>]]
for taget[name[i]] in starred[call[name[range], parameter[name[len_prefcounts]]]] begin[:]
variable[vote_top2] assign[=] <ast.DictComp object at 0x7da18ede63b0>
variable[top_position] assign[=] call[name[min], parameter[call[name[vote_top2].values, parameter[]]]]
variable[keys] assign[=] <ast.ListComp object at 0x7da18ede4640>
for taget[name[key]] in starred[name[keys]] begin[:]
<ast.AugAssign object at 0x7da18ede4880>
variable[max_value] assign[=] call[name[max], parameter[call[name[dict_top2].values, parameter[]]]]
variable[winners] assign[=] <ast.ListComp object at 0x7da18ede68f0>
variable[known_winners] assign[=] binary_operation[name[known_winners] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[name[winners]]]]
return[call[name[sorted], parameter[name[known_winners]]]] | keyword[def] identifier[PluRunOff_cowinners] ( identifier[self] , identifier[profile] ):
literal[string]
identifier[elecType] = identifier[profile] . identifier[getElecType] ()
keyword[if] identifier[elecType] != literal[string] keyword[and] identifier[elecType] != literal[string] keyword[and] identifier[elecType] != literal[string] :
identifier[print] ( literal[string] )
identifier[exit] ()
identifier[prefcounts] = identifier[profile] . identifier[getPreferenceCounts] ()
identifier[len_prefcounts] = identifier[len] ( identifier[prefcounts] )
identifier[rankmaps] = identifier[profile] . identifier[getRankMaps] ()
identifier[ranking] = identifier[MechanismPlurality] (). identifier[getRanking] ( identifier[profile] )
identifier[known_winners] = identifier[set] ()
identifier[top_2_combinations] =[]
keyword[if] identifier[len] ( identifier[ranking] [ literal[int] ][ literal[int] ])> literal[int] :
keyword[for] identifier[cand1] , identifier[cand2] keyword[in] identifier[itertools] . identifier[combinations] ( identifier[ranking] [ literal[int] ][ literal[int] ], literal[int] ):
identifier[top_2_combinations] . identifier[append] ([ identifier[cand1] , identifier[cand2] ])
keyword[else] :
identifier[max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
keyword[if] identifier[len] ( identifier[ranking] [ literal[int] ][ literal[int] ])> literal[int] :
keyword[for] identifier[second_max_cand] keyword[in] identifier[ranking] [ literal[int] ][ literal[int] ]:
identifier[top_2_combinations] . identifier[append] ([ identifier[max_cand] , identifier[second_max_cand] ])
keyword[else] :
identifier[second_max_cand] = identifier[ranking] [ literal[int] ][ literal[int] ][ literal[int] ]
identifier[top_2_combinations] . identifier[append] ([ identifier[max_cand] , identifier[second_max_cand] ])
keyword[for] identifier[top_2] keyword[in] identifier[top_2_combinations] :
identifier[dict_top2] ={ identifier[top_2] [ literal[int] ]: literal[int] , identifier[top_2] [ literal[int] ]: literal[int] }
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_prefcounts] ):
identifier[vote_top2] ={ identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[rankmaps] [ identifier[i] ]. identifier[items] () keyword[if] identifier[key] keyword[in] identifier[top_2] }
identifier[top_position] = identifier[min] ( identifier[vote_top2] . identifier[values] ())
identifier[keys] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[vote_top2] . identifier[keys] () keyword[if] identifier[vote_top2] [ identifier[x] ]== identifier[top_position] ]
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[dict_top2] [ identifier[key] ]+= identifier[prefcounts] [ identifier[i] ]
identifier[max_value] = identifier[max] ( identifier[dict_top2] . identifier[values] ())
identifier[winners] =[ identifier[y] keyword[for] identifier[y] keyword[in] identifier[dict_top2] . identifier[keys] () keyword[if] identifier[dict_top2] [ identifier[y] ]== identifier[max_value] ]
identifier[known_winners] = identifier[known_winners] | identifier[set] ( identifier[winners] )
keyword[return] identifier[sorted] ( identifier[known_winners] ) | def PluRunOff_cowinners(self, profile):
"""
Returns a list that associates all the winners of a profile under Plurality with Runoff rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != 'soc' and elecType != 'toc' and (elecType != 'csv'):
print('ERROR: unsupported election type')
exit() # depends on [control=['if'], data=[]]
# Initialization
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
ranking = MechanismPlurality().getRanking(profile)
known_winners = set()
# 1st round: find the top 2 candidates in plurality scores
top_2_combinations = []
if len(ranking[0][0]) > 1:
for (cand1, cand2) in itertools.combinations(ranking[0][0], 2):
top_2_combinations.append([cand1, cand2]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
max_cand = ranking[0][0][0]
if len(ranking[0][1]) > 1:
for second_max_cand in ranking[0][1]:
top_2_combinations.append([max_cand, second_max_cand]) # depends on [control=['for'], data=['second_max_cand']] # depends on [control=['if'], data=[]]
else:
second_max_cand = ranking[0][1][0]
top_2_combinations.append([max_cand, second_max_cand])
# 2nd round: find the candidate with maximum plurality score
for top_2 in top_2_combinations:
dict_top2 = {top_2[0]: 0, top_2[1]: 0}
for i in range(len_prefcounts):
vote_top2 = {key: value for (key, value) in rankmaps[i].items() if key in top_2}
top_position = min(vote_top2.values())
keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position]
for key in keys:
dict_top2[key] += prefcounts[i] # depends on [control=['for'], data=['key']] # depends on [control=['for'], data=['i']]
max_value = max(dict_top2.values())
winners = [y for y in dict_top2.keys() if dict_top2[y] == max_value]
known_winners = known_winners | set(winners) # depends on [control=['for'], data=['top_2']]
return sorted(known_winners) |
def imsize(fname):
"""
return image size (height, width)
:param fname:
:return:
"""
from PIL import Image
im = Image.open(fname)
return im.size[1], im.size[0] | def function[imsize, parameter[fname]]:
constant[
return image size (height, width)
:param fname:
:return:
]
from relative_module[PIL] import module[Image]
variable[im] assign[=] call[name[Image].open, parameter[name[fname]]]
return[tuple[[<ast.Subscript object at 0x7da2054a4a90>, <ast.Subscript object at 0x7da2054a51e0>]]] | keyword[def] identifier[imsize] ( identifier[fname] ):
literal[string]
keyword[from] identifier[PIL] keyword[import] identifier[Image]
identifier[im] = identifier[Image] . identifier[open] ( identifier[fname] )
keyword[return] identifier[im] . identifier[size] [ literal[int] ], identifier[im] . identifier[size] [ literal[int] ] | def imsize(fname):
"""
return image size (height, width)
:param fname:
:return:
"""
from PIL import Image
im = Image.open(fname)
return (im.size[1], im.size[0]) |
def get_qrcode(self, product_id=1):
"""
获取deviceid和二维码
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param product_id: 设备的产品编号
:return: 返回的 JSON 数据包
"""
if product_id == '1' or product_id == 1:
params = None
else:
params = {'product_id': product_id}
return self._get('getqrcode', params=params) | def function[get_qrcode, parameter[self, product_id]]:
constant[
获取deviceid和二维码
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param product_id: 设备的产品编号
:return: 返回的 JSON 数据包
]
if <ast.BoolOp object at 0x7da204960ac0> begin[:]
variable[params] assign[=] constant[None]
return[call[name[self]._get, parameter[constant[getqrcode]]]] | keyword[def] identifier[get_qrcode] ( identifier[self] , identifier[product_id] = literal[int] ):
literal[string]
keyword[if] identifier[product_id] == literal[string] keyword[or] identifier[product_id] == literal[int] :
identifier[params] = keyword[None]
keyword[else] :
identifier[params] ={ literal[string] : identifier[product_id] }
keyword[return] identifier[self] . identifier[_get] ( literal[string] , identifier[params] = identifier[params] ) | def get_qrcode(self, product_id=1):
"""
获取deviceid和二维码
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param product_id: 设备的产品编号
:return: 返回的 JSON 数据包
"""
if product_id == '1' or product_id == 1:
params = None # depends on [control=['if'], data=[]]
else:
params = {'product_id': product_id}
return self._get('getqrcode', params=params) |
def add_content(self, data):
# type: (Optional[Union[Dict[str, Any], ET.Element]]) -> None
"""Add a body to the request.
:param data: Request body data, can be a json serializable
object (e.g. dictionary) or a generator (e.g. file data).
"""
if data is None:
return
if isinstance(data, ET.Element):
bytes_data = ET.tostring(data, encoding="utf8")
self.headers['Content-Length'] = str(len(bytes_data))
self.data = bytes_data
return
# By default, assume JSON
try:
self.data = json.dumps(data)
self.headers['Content-Length'] = str(len(self.data))
except TypeError:
self.data = data | def function[add_content, parameter[self, data]]:
constant[Add a body to the request.
:param data: Request body data, can be a json serializable
object (e.g. dictionary) or a generator (e.g. file data).
]
if compare[name[data] is constant[None]] begin[:]
return[None]
if call[name[isinstance], parameter[name[data], name[ET].Element]] begin[:]
variable[bytes_data] assign[=] call[name[ET].tostring, parameter[name[data]]]
call[name[self].headers][constant[Content-Length]] assign[=] call[name[str], parameter[call[name[len], parameter[name[bytes_data]]]]]
name[self].data assign[=] name[bytes_data]
return[None]
<ast.Try object at 0x7da18c4cee60> | keyword[def] identifier[add_content] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[ET] . identifier[Element] ):
identifier[bytes_data] = identifier[ET] . identifier[tostring] ( identifier[data] , identifier[encoding] = literal[string] )
identifier[self] . identifier[headers] [ literal[string] ]= identifier[str] ( identifier[len] ( identifier[bytes_data] ))
identifier[self] . identifier[data] = identifier[bytes_data]
keyword[return]
keyword[try] :
identifier[self] . identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] )
identifier[self] . identifier[headers] [ literal[string] ]= identifier[str] ( identifier[len] ( identifier[self] . identifier[data] ))
keyword[except] identifier[TypeError] :
identifier[self] . identifier[data] = identifier[data] | def add_content(self, data):
# type: (Optional[Union[Dict[str, Any], ET.Element]]) -> None
'Add a body to the request.\n\n :param data: Request body data, can be a json serializable\n object (e.g. dictionary) or a generator (e.g. file data).\n '
if data is None:
return # depends on [control=['if'], data=[]]
if isinstance(data, ET.Element):
bytes_data = ET.tostring(data, encoding='utf8')
self.headers['Content-Length'] = str(len(bytes_data))
self.data = bytes_data
return # depends on [control=['if'], data=[]]
# By default, assume JSON
try:
self.data = json.dumps(data)
self.headers['Content-Length'] = str(len(self.data)) # depends on [control=['try'], data=[]]
except TypeError:
self.data = data # depends on [control=['except'], data=[]] |
def send_message(self, message):
"""Send a given message to the remote host."""
self.print_debug_message(message)
self.socket.send(message) | def function[send_message, parameter[self, message]]:
constant[Send a given message to the remote host.]
call[name[self].print_debug_message, parameter[name[message]]]
call[name[self].socket.send, parameter[name[message]]] | keyword[def] identifier[send_message] ( identifier[self] , identifier[message] ):
literal[string]
identifier[self] . identifier[print_debug_message] ( identifier[message] )
identifier[self] . identifier[socket] . identifier[send] ( identifier[message] ) | def send_message(self, message):
"""Send a given message to the remote host."""
self.print_debug_message(message)
self.socket.send(message) |
def join(self, timeout=None):
"""Blocking wait for the execution to finish
:param float timeout: Maximum time to wait or None for infinitely
:return: True if the execution finished, False if no state machine was started or a timeout occurred
:rtype: bool
"""
if self.__wait_for_finishing_thread:
if not timeout:
# signal handlers won't work if timeout is None and the thread is joined
while True:
self.__wait_for_finishing_thread.join(0.5)
if not self.__wait_for_finishing_thread.isAlive():
break
else:
self.__wait_for_finishing_thread.join(timeout)
return not self.__wait_for_finishing_thread.is_alive()
else:
logger.warning("Cannot join as state machine was not started yet.")
return False | def function[join, parameter[self, timeout]]:
constant[Blocking wait for the execution to finish
:param float timeout: Maximum time to wait or None for infinitely
:return: True if the execution finished, False if no state machine was started or a timeout occurred
:rtype: bool
]
if name[self].__wait_for_finishing_thread begin[:]
if <ast.UnaryOp object at 0x7da2044c35b0> begin[:]
while constant[True] begin[:]
call[name[self].__wait_for_finishing_thread.join, parameter[constant[0.5]]]
if <ast.UnaryOp object at 0x7da2044c2740> begin[:]
break
return[<ast.UnaryOp object at 0x7da2044c1930>] | keyword[def] identifier[join] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[__wait_for_finishing_thread] :
keyword[if] keyword[not] identifier[timeout] :
keyword[while] keyword[True] :
identifier[self] . identifier[__wait_for_finishing_thread] . identifier[join] ( literal[int] )
keyword[if] keyword[not] identifier[self] . identifier[__wait_for_finishing_thread] . identifier[isAlive] ():
keyword[break]
keyword[else] :
identifier[self] . identifier[__wait_for_finishing_thread] . identifier[join] ( identifier[timeout] )
keyword[return] keyword[not] identifier[self] . identifier[__wait_for_finishing_thread] . identifier[is_alive] ()
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] keyword[False] | def join(self, timeout=None):
"""Blocking wait for the execution to finish
:param float timeout: Maximum time to wait or None for infinitely
:return: True if the execution finished, False if no state machine was started or a timeout occurred
:rtype: bool
"""
if self.__wait_for_finishing_thread:
if not timeout:
# signal handlers won't work if timeout is None and the thread is joined
while True:
self.__wait_for_finishing_thread.join(0.5)
if not self.__wait_for_finishing_thread.isAlive():
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
self.__wait_for_finishing_thread.join(timeout)
return not self.__wait_for_finishing_thread.is_alive() # depends on [control=['if'], data=[]]
else:
logger.warning('Cannot join as state machine was not started yet.')
return False |
def updateIncomeProcess(self):
'''
An alternative method for constructing the income process in the infinite horizon model.
Parameters
----------
none
Returns
-------
none
'''
if self.cycles == 0:
tax_rate = (self.IncUnemp*self.UnempPrb)/((1.0-self.UnempPrb)*self.IndL)
TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount,sigma=self.TranShkStd[0],tail_N=0))
TranShkDstn[0] = np.insert(TranShkDstn[0]*(1.0-self.UnempPrb),0,self.UnempPrb)
TranShkDstn[1] = np.insert(TranShkDstn[1]*(1.0-tax_rate)*self.IndL,0,self.IncUnemp)
PermShkDstn = approxMeanOneLognormal(self.PermShkCount,sigma=self.PermShkStd[0],tail_N=0)
self.IncomeDstn = [combineIndepDstns(PermShkDstn,TranShkDstn)]
self.TranShkDstn = TranShkDstn
self.PermShkDstn = PermShkDstn
self.addToTimeVary('IncomeDstn')
else: # Do the usual method if this is the lifecycle model
EstimationAgentClass.updateIncomeProcess(self) | def function[updateIncomeProcess, parameter[self]]:
constant[
An alternative method for constructing the income process in the infinite horizon model.
Parameters
----------
none
Returns
-------
none
]
if compare[name[self].cycles equal[==] constant[0]] begin[:]
variable[tax_rate] assign[=] binary_operation[binary_operation[name[self].IncUnemp * name[self].UnempPrb] / binary_operation[binary_operation[constant[1.0] - name[self].UnempPrb] * name[self].IndL]]
variable[TranShkDstn] assign[=] call[name[deepcopy], parameter[call[name[approxMeanOneLognormal], parameter[name[self].TranShkCount]]]]
call[name[TranShkDstn]][constant[0]] assign[=] call[name[np].insert, parameter[binary_operation[call[name[TranShkDstn]][constant[0]] * binary_operation[constant[1.0] - name[self].UnempPrb]], constant[0], name[self].UnempPrb]]
call[name[TranShkDstn]][constant[1]] assign[=] call[name[np].insert, parameter[binary_operation[binary_operation[call[name[TranShkDstn]][constant[1]] * binary_operation[constant[1.0] - name[tax_rate]]] * name[self].IndL], constant[0], name[self].IncUnemp]]
variable[PermShkDstn] assign[=] call[name[approxMeanOneLognormal], parameter[name[self].PermShkCount]]
name[self].IncomeDstn assign[=] list[[<ast.Call object at 0x7da18ede71f0>]]
name[self].TranShkDstn assign[=] name[TranShkDstn]
name[self].PermShkDstn assign[=] name[PermShkDstn]
call[name[self].addToTimeVary, parameter[constant[IncomeDstn]]] | keyword[def] identifier[updateIncomeProcess] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[cycles] == literal[int] :
identifier[tax_rate] =( identifier[self] . identifier[IncUnemp] * identifier[self] . identifier[UnempPrb] )/(( literal[int] - identifier[self] . identifier[UnempPrb] )* identifier[self] . identifier[IndL] )
identifier[TranShkDstn] = identifier[deepcopy] ( identifier[approxMeanOneLognormal] ( identifier[self] . identifier[TranShkCount] , identifier[sigma] = identifier[self] . identifier[TranShkStd] [ literal[int] ], identifier[tail_N] = literal[int] ))
identifier[TranShkDstn] [ literal[int] ]= identifier[np] . identifier[insert] ( identifier[TranShkDstn] [ literal[int] ]*( literal[int] - identifier[self] . identifier[UnempPrb] ), literal[int] , identifier[self] . identifier[UnempPrb] )
identifier[TranShkDstn] [ literal[int] ]= identifier[np] . identifier[insert] ( identifier[TranShkDstn] [ literal[int] ]*( literal[int] - identifier[tax_rate] )* identifier[self] . identifier[IndL] , literal[int] , identifier[self] . identifier[IncUnemp] )
identifier[PermShkDstn] = identifier[approxMeanOneLognormal] ( identifier[self] . identifier[PermShkCount] , identifier[sigma] = identifier[self] . identifier[PermShkStd] [ literal[int] ], identifier[tail_N] = literal[int] )
identifier[self] . identifier[IncomeDstn] =[ identifier[combineIndepDstns] ( identifier[PermShkDstn] , identifier[TranShkDstn] )]
identifier[self] . identifier[TranShkDstn] = identifier[TranShkDstn]
identifier[self] . identifier[PermShkDstn] = identifier[PermShkDstn]
identifier[self] . identifier[addToTimeVary] ( literal[string] )
keyword[else] :
identifier[EstimationAgentClass] . identifier[updateIncomeProcess] ( identifier[self] ) | def updateIncomeProcess(self):
"""
An alternative method for constructing the income process in the infinite horizon model.
Parameters
----------
none
Returns
-------
none
"""
if self.cycles == 0:
tax_rate = self.IncUnemp * self.UnempPrb / ((1.0 - self.UnempPrb) * self.IndL)
TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount, sigma=self.TranShkStd[0], tail_N=0))
TranShkDstn[0] = np.insert(TranShkDstn[0] * (1.0 - self.UnempPrb), 0, self.UnempPrb)
TranShkDstn[1] = np.insert(TranShkDstn[1] * (1.0 - tax_rate) * self.IndL, 0, self.IncUnemp)
PermShkDstn = approxMeanOneLognormal(self.PermShkCount, sigma=self.PermShkStd[0], tail_N=0)
self.IncomeDstn = [combineIndepDstns(PermShkDstn, TranShkDstn)]
self.TranShkDstn = TranShkDstn
self.PermShkDstn = PermShkDstn
self.addToTimeVary('IncomeDstn') # depends on [control=['if'], data=[]]
else: # Do the usual method if this is the lifecycle model
EstimationAgentClass.updateIncomeProcess(self) |
def _calc_delta(self,ensemble,scaling_matrix=None):
'''
calc the scaled ensemble differences from the mean
'''
mean = np.array(ensemble.mean(axis=0))
delta = ensemble.as_pyemu_matrix()
for i in range(ensemble.shape[0]):
delta.x[i,:] -= mean
if scaling_matrix is not None:
delta = scaling_matrix * delta.T
delta *= (1.0 / np.sqrt(float(ensemble.shape[0] - 1.0)))
return delta | def function[_calc_delta, parameter[self, ensemble, scaling_matrix]]:
constant[
calc the scaled ensemble differences from the mean
]
variable[mean] assign[=] call[name[np].array, parameter[call[name[ensemble].mean, parameter[]]]]
variable[delta] assign[=] call[name[ensemble].as_pyemu_matrix, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[ensemble].shape][constant[0]]]]] begin[:]
<ast.AugAssign object at 0x7da1b230a290>
if compare[name[scaling_matrix] is_not constant[None]] begin[:]
variable[delta] assign[=] binary_operation[name[scaling_matrix] * name[delta].T]
<ast.AugAssign object at 0x7da1b2309c60>
return[name[delta]] | keyword[def] identifier[_calc_delta] ( identifier[self] , identifier[ensemble] , identifier[scaling_matrix] = keyword[None] ):
literal[string]
identifier[mean] = identifier[np] . identifier[array] ( identifier[ensemble] . identifier[mean] ( identifier[axis] = literal[int] ))
identifier[delta] = identifier[ensemble] . identifier[as_pyemu_matrix] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ensemble] . identifier[shape] [ literal[int] ]):
identifier[delta] . identifier[x] [ identifier[i] ,:]-= identifier[mean]
keyword[if] identifier[scaling_matrix] keyword[is] keyword[not] keyword[None] :
identifier[delta] = identifier[scaling_matrix] * identifier[delta] . identifier[T]
identifier[delta] *=( literal[int] / identifier[np] . identifier[sqrt] ( identifier[float] ( identifier[ensemble] . identifier[shape] [ literal[int] ]- literal[int] )))
keyword[return] identifier[delta] | def _calc_delta(self, ensemble, scaling_matrix=None):
"""
calc the scaled ensemble differences from the mean
"""
mean = np.array(ensemble.mean(axis=0))
delta = ensemble.as_pyemu_matrix()
for i in range(ensemble.shape[0]):
delta.x[i, :] -= mean # depends on [control=['for'], data=['i']]
if scaling_matrix is not None:
delta = scaling_matrix * delta.T # depends on [control=['if'], data=['scaling_matrix']]
delta *= 1.0 / np.sqrt(float(ensemble.shape[0] - 1.0))
return delta |
def dump(self):
"""Serialize note data.
Args:
state (dict): Serialized state to load.
"""
# Find all nodes manually, as the Keep object isn't aware of new ListItems
# until they've been synced to the server.
nodes = []
for node in self.all():
nodes.append(node)
for child in node.children:
nodes.append(child)
return {
'keep_version': self._keep_version,
'labels': [label.save(False) for label in self.labels()],
'nodes': [node.save(False) for node in nodes]
} | def function[dump, parameter[self]]:
constant[Serialize note data.
Args:
state (dict): Serialized state to load.
]
variable[nodes] assign[=] list[[]]
for taget[name[node]] in starred[call[name[self].all, parameter[]]] begin[:]
call[name[nodes].append, parameter[name[node]]]
for taget[name[child]] in starred[name[node].children] begin[:]
call[name[nodes].append, parameter[name[child]]]
return[dictionary[[<ast.Constant object at 0x7da18ede6140>, <ast.Constant object at 0x7da18ede71f0>, <ast.Constant object at 0x7da18ede5630>], [<ast.Attribute object at 0x7da18ede5570>, <ast.ListComp object at 0x7da18ede7d00>, <ast.ListComp object at 0x7da18ede7970>]]] | keyword[def] identifier[dump] ( identifier[self] ):
literal[string]
identifier[nodes] =[]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[all] ():
identifier[nodes] . identifier[append] ( identifier[node] )
keyword[for] identifier[child] keyword[in] identifier[node] . identifier[children] :
identifier[nodes] . identifier[append] ( identifier[child] )
keyword[return] {
literal[string] : identifier[self] . identifier[_keep_version] ,
literal[string] :[ identifier[label] . identifier[save] ( keyword[False] ) keyword[for] identifier[label] keyword[in] identifier[self] . identifier[labels] ()],
literal[string] :[ identifier[node] . identifier[save] ( keyword[False] ) keyword[for] identifier[node] keyword[in] identifier[nodes] ]
} | def dump(self):
"""Serialize note data.
Args:
state (dict): Serialized state to load.
"""
# Find all nodes manually, as the Keep object isn't aware of new ListItems
# until they've been synced to the server.
nodes = []
for node in self.all():
nodes.append(node)
for child in node.children:
nodes.append(child) # depends on [control=['for'], data=['child']] # depends on [control=['for'], data=['node']]
return {'keep_version': self._keep_version, 'labels': [label.save(False) for label in self.labels()], 'nodes': [node.save(False) for node in nodes]} |
def find_bidi(self, el):
"""Get directionality from element text."""
for node in self.get_children(el, tags=False):
# Analyze child text nodes
if self.is_tag(node):
# Avoid analyzing certain elements specified in the specification.
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(node, 'dir', '')), None)
if (
self.get_tag(node) in ('bdi', 'script', 'style', 'textarea', 'iframe') or
not self.is_html_tag(node) or
direction is not None
):
continue # pragma: no cover
# Check directionality of this node's text
value = self.find_bidi(node)
if value is not None:
return value
# Direction could not be determined
continue # pragma: no cover
# Skip `doctype` comments, etc.
if self.is_special_string(node):
continue
# Analyze text nodes for directionality.
for c in node:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
return ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return None | def function[find_bidi, parameter[self, el]]:
constant[Get directionality from element text.]
for taget[name[node]] in starred[call[name[self].get_children, parameter[name[el]]]] begin[:]
if call[name[self].is_tag, parameter[name[node]]] begin[:]
variable[direction] assign[=] call[name[DIR_MAP].get, parameter[call[name[util].lower, parameter[call[name[self].get_attribute_by_name, parameter[name[node], constant[dir], constant[]]]]], constant[None]]]
if <ast.BoolOp object at 0x7da1b031caf0> begin[:]
continue
variable[value] assign[=] call[name[self].find_bidi, parameter[name[node]]]
if compare[name[value] is_not constant[None]] begin[:]
return[name[value]]
continue
if call[name[self].is_special_string, parameter[name[node]]] begin[:]
continue
for taget[name[c]] in starred[name[node]] begin[:]
variable[bidi] assign[=] call[name[unicodedata].bidirectional, parameter[name[c]]]
if compare[name[bidi] in tuple[[<ast.Constant object at 0x7da20cabdfc0>, <ast.Constant object at 0x7da20cabce20>, <ast.Constant object at 0x7da20cabd840>]]] begin[:]
return[<ast.IfExp object at 0x7da20cabd570>]
return[constant[None]] | keyword[def] identifier[find_bidi] ( identifier[self] , identifier[el] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[get_children] ( identifier[el] , identifier[tags] = keyword[False] ):
keyword[if] identifier[self] . identifier[is_tag] ( identifier[node] ):
identifier[direction] = identifier[DIR_MAP] . identifier[get] ( identifier[util] . identifier[lower] ( identifier[self] . identifier[get_attribute_by_name] ( identifier[node] , literal[string] , literal[string] )), keyword[None] )
keyword[if] (
identifier[self] . identifier[get_tag] ( identifier[node] ) keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) keyword[or]
keyword[not] identifier[self] . identifier[is_html_tag] ( identifier[node] ) keyword[or]
identifier[direction] keyword[is] keyword[not] keyword[None]
):
keyword[continue]
identifier[value] = identifier[self] . identifier[find_bidi] ( identifier[node] )
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[value]
keyword[continue]
keyword[if] identifier[self] . identifier[is_special_string] ( identifier[node] ):
keyword[continue]
keyword[for] identifier[c] keyword[in] identifier[node] :
identifier[bidi] = identifier[unicodedata] . identifier[bidirectional] ( identifier[c] )
keyword[if] identifier[bidi] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[return] identifier[ct] . identifier[SEL_DIR_LTR] keyword[if] identifier[bidi] == literal[string] keyword[else] identifier[ct] . identifier[SEL_DIR_RTL]
keyword[return] keyword[None] | def find_bidi(self, el):
"""Get directionality from element text."""
for node in self.get_children(el, tags=False):
# Analyze child text nodes
if self.is_tag(node):
# Avoid analyzing certain elements specified in the specification.
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(node, 'dir', '')), None)
if self.get_tag(node) in ('bdi', 'script', 'style', 'textarea', 'iframe') or not self.is_html_tag(node) or direction is not None:
continue # pragma: no cover # depends on [control=['if'], data=[]]
# Check directionality of this node's text
value = self.find_bidi(node)
if value is not None:
return value # depends on [control=['if'], data=['value']]
# Direction could not be determined
continue # pragma: no cover # depends on [control=['if'], data=[]]
# Skip `doctype` comments, etc.
if self.is_special_string(node):
continue # depends on [control=['if'], data=[]]
# Analyze text nodes for directionality.
for c in node:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
return ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL # depends on [control=['if'], data=['bidi']] # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=['node']]
return None |
def simxStart(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(connectionAddress) is str):
connectionAddress=connectionAddress.encode('utf-8')
return c_Start(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs) | def function[simxStart, parameter[connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs]]:
constant[
Please have a look at the function description/documentation in the V-REP user manual
]
if <ast.BoolOp object at 0x7da1b133dfc0> begin[:]
variable[connectionAddress] assign[=] call[name[connectionAddress].encode, parameter[constant[utf-8]]]
return[call[name[c_Start], parameter[name[connectionAddress], name[connectionPort], name[waitUntilConnected], name[doNotReconnectOnceDisconnected], name[timeOutInMs], name[commThreadCycleInMs]]]] | keyword[def] identifier[simxStart] ( identifier[connectionAddress] , identifier[connectionPort] , identifier[waitUntilConnected] , identifier[doNotReconnectOnceDisconnected] , identifier[timeOutInMs] , identifier[commThreadCycleInMs] ):
literal[string]
keyword[if] ( identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] ) keyword[and] ( identifier[type] ( identifier[connectionAddress] ) keyword[is] identifier[str] ):
identifier[connectionAddress] = identifier[connectionAddress] . identifier[encode] ( literal[string] )
keyword[return] identifier[c_Start] ( identifier[connectionAddress] , identifier[connectionPort] , identifier[waitUntilConnected] , identifier[doNotReconnectOnceDisconnected] , identifier[timeOutInMs] , identifier[commThreadCycleInMs] ) | def simxStart(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs):
"""
Please have a look at the function description/documentation in the V-REP user manual
"""
if sys.version_info[0] == 3 and type(connectionAddress) is str:
connectionAddress = connectionAddress.encode('utf-8') # depends on [control=['if'], data=[]]
return c_Start(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs) |
def deduplication_backup(self, process_bar):
"""
Backup the current file and compare the content.
:param process_bar: tqdm process bar
"""
self.fast_backup = False # Was a fast backup used?
src_path = self.dir_path.resolved_path
log.debug("*** deduplication backup: '%s'", src_path)
log.debug("abs_src_filepath: '%s'", self.path_helper.abs_src_filepath)
log.debug("abs_dst_filepath: '%s'", self.path_helper.abs_dst_filepath)
log.debug("abs_dst_hash_filepath: '%s'", self.path_helper.abs_dst_hash_filepath)
log.debug("abs_dst_dir: '%s'", self.path_helper.abs_dst_path)
if not self.path_helper.abs_dst_path.is_dir():
try:
self.path_helper.abs_dst_path.makedirs(mode=phlb_config.default_new_path_mode)
except OSError as err:
raise BackupFileError("Error creating out path: %s" % err)
else:
assert not self.path_helper.abs_dst_filepath.is_file(), (
"Out file already exists: %r" % self.path_helper.abs_src_filepath
)
try:
try:
with self.path_helper.abs_src_filepath.open("rb") as in_file:
with self.path_helper.abs_dst_hash_filepath.open("w") as hash_file:
with self.path_helper.abs_dst_filepath.open("wb") as out_file:
hash = self._deduplication_backup(self.dir_path, in_file, out_file, process_bar)
hash_hexdigest = hash.hexdigest()
hash_file.write(hash_hexdigest)
except OSError as err:
# FIXME: Better error message
raise BackupFileError("Skip file %s error: %s" % (self.path_helper.abs_src_filepath, err))
except KeyboardInterrupt:
# Try to remove created files
try:
self.path_helper.abs_dst_filepath.unlink()
except OSError:
pass
try:
self.path_helper.abs_dst_hash_filepath.unlink()
except OSError:
pass
raise KeyboardInterrupt
old_backup_entry = deduplicate(self.path_helper.abs_dst_filepath, hash_hexdigest)
if old_backup_entry is None:
log.debug("File is unique.")
self.file_linked = False # Was a hardlink used?
else:
log.debug("File was deduplicated via hardlink to: %s" % old_backup_entry)
self.file_linked = True # Was a hardlink used?
# set origin access/modified times to the new created backup file
atime_ns = self.dir_path.stat.st_atime_ns
mtime_ns = self.dir_path.stat.st_mtime_ns
self.path_helper.abs_dst_filepath.utime(ns=(atime_ns, mtime_ns)) # call os.utime()
log.debug("Set mtime to: %s" % mtime_ns)
BackupEntry.objects.create(
backup_run=self.backup_run,
backup_entry_path=self.path_helper.abs_dst_filepath,
hash_hexdigest=hash_hexdigest,
)
self.fast_backup = False | def function[deduplication_backup, parameter[self, process_bar]]:
constant[
Backup the current file and compare the content.
:param process_bar: tqdm process bar
]
name[self].fast_backup assign[=] constant[False]
variable[src_path] assign[=] name[self].dir_path.resolved_path
call[name[log].debug, parameter[constant[*** deduplication backup: '%s'], name[src_path]]]
call[name[log].debug, parameter[constant[abs_src_filepath: '%s'], name[self].path_helper.abs_src_filepath]]
call[name[log].debug, parameter[constant[abs_dst_filepath: '%s'], name[self].path_helper.abs_dst_filepath]]
call[name[log].debug, parameter[constant[abs_dst_hash_filepath: '%s'], name[self].path_helper.abs_dst_hash_filepath]]
call[name[log].debug, parameter[constant[abs_dst_dir: '%s'], name[self].path_helper.abs_dst_path]]
if <ast.UnaryOp object at 0x7da18f09fca0> begin[:]
<ast.Try object at 0x7da18f09cb50>
<ast.Try object at 0x7da18f09e920>
variable[old_backup_entry] assign[=] call[name[deduplicate], parameter[name[self].path_helper.abs_dst_filepath, name[hash_hexdigest]]]
if compare[name[old_backup_entry] is constant[None]] begin[:]
call[name[log].debug, parameter[constant[File is unique.]]]
name[self].file_linked assign[=] constant[False]
variable[atime_ns] assign[=] name[self].dir_path.stat.st_atime_ns
variable[mtime_ns] assign[=] name[self].dir_path.stat.st_mtime_ns
call[name[self].path_helper.abs_dst_filepath.utime, parameter[]]
call[name[log].debug, parameter[binary_operation[constant[Set mtime to: %s] <ast.Mod object at 0x7da2590d6920> name[mtime_ns]]]]
call[name[BackupEntry].objects.create, parameter[]]
name[self].fast_backup assign[=] constant[False] | keyword[def] identifier[deduplication_backup] ( identifier[self] , identifier[process_bar] ):
literal[string]
identifier[self] . identifier[fast_backup] = keyword[False]
identifier[src_path] = identifier[self] . identifier[dir_path] . identifier[resolved_path]
identifier[log] . identifier[debug] ( literal[string] , identifier[src_path] )
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[path_helper] . identifier[abs_src_filepath] )
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[path_helper] . identifier[abs_dst_filepath] )
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[path_helper] . identifier[abs_dst_hash_filepath] )
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[path_helper] . identifier[abs_dst_path] )
keyword[if] keyword[not] identifier[self] . identifier[path_helper] . identifier[abs_dst_path] . identifier[is_dir] ():
keyword[try] :
identifier[self] . identifier[path_helper] . identifier[abs_dst_path] . identifier[makedirs] ( identifier[mode] = identifier[phlb_config] . identifier[default_new_path_mode] )
keyword[except] identifier[OSError] keyword[as] identifier[err] :
keyword[raise] identifier[BackupFileError] ( literal[string] % identifier[err] )
keyword[else] :
keyword[assert] keyword[not] identifier[self] . identifier[path_helper] . identifier[abs_dst_filepath] . identifier[is_file] (),(
literal[string] % identifier[self] . identifier[path_helper] . identifier[abs_src_filepath]
)
keyword[try] :
keyword[try] :
keyword[with] identifier[self] . identifier[path_helper] . identifier[abs_src_filepath] . identifier[open] ( literal[string] ) keyword[as] identifier[in_file] :
keyword[with] identifier[self] . identifier[path_helper] . identifier[abs_dst_hash_filepath] . identifier[open] ( literal[string] ) keyword[as] identifier[hash_file] :
keyword[with] identifier[self] . identifier[path_helper] . identifier[abs_dst_filepath] . identifier[open] ( literal[string] ) keyword[as] identifier[out_file] :
identifier[hash] = identifier[self] . identifier[_deduplication_backup] ( identifier[self] . identifier[dir_path] , identifier[in_file] , identifier[out_file] , identifier[process_bar] )
identifier[hash_hexdigest] = identifier[hash] . identifier[hexdigest] ()
identifier[hash_file] . identifier[write] ( identifier[hash_hexdigest] )
keyword[except] identifier[OSError] keyword[as] identifier[err] :
keyword[raise] identifier[BackupFileError] ( literal[string] %( identifier[self] . identifier[path_helper] . identifier[abs_src_filepath] , identifier[err] ))
keyword[except] identifier[KeyboardInterrupt] :
keyword[try] :
identifier[self] . identifier[path_helper] . identifier[abs_dst_filepath] . identifier[unlink] ()
keyword[except] identifier[OSError] :
keyword[pass]
keyword[try] :
identifier[self] . identifier[path_helper] . identifier[abs_dst_hash_filepath] . identifier[unlink] ()
keyword[except] identifier[OSError] :
keyword[pass]
keyword[raise] identifier[KeyboardInterrupt]
identifier[old_backup_entry] = identifier[deduplicate] ( identifier[self] . identifier[path_helper] . identifier[abs_dst_filepath] , identifier[hash_hexdigest] )
keyword[if] identifier[old_backup_entry] keyword[is] keyword[None] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[file_linked] = keyword[False]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] % identifier[old_backup_entry] )
identifier[self] . identifier[file_linked] = keyword[True]
identifier[atime_ns] = identifier[self] . identifier[dir_path] . identifier[stat] . identifier[st_atime_ns]
identifier[mtime_ns] = identifier[self] . identifier[dir_path] . identifier[stat] . identifier[st_mtime_ns]
identifier[self] . identifier[path_helper] . identifier[abs_dst_filepath] . identifier[utime] ( identifier[ns] =( identifier[atime_ns] , identifier[mtime_ns] ))
identifier[log] . identifier[debug] ( literal[string] % identifier[mtime_ns] )
identifier[BackupEntry] . identifier[objects] . identifier[create] (
identifier[backup_run] = identifier[self] . identifier[backup_run] ,
identifier[backup_entry_path] = identifier[self] . identifier[path_helper] . identifier[abs_dst_filepath] ,
identifier[hash_hexdigest] = identifier[hash_hexdigest] ,
)
identifier[self] . identifier[fast_backup] = keyword[False] | def deduplication_backup(self, process_bar):
"""
Backup the current file and compare the content.
:param process_bar: tqdm process bar
"""
self.fast_backup = False # Was a fast backup used?
src_path = self.dir_path.resolved_path
log.debug("*** deduplication backup: '%s'", src_path)
log.debug("abs_src_filepath: '%s'", self.path_helper.abs_src_filepath)
log.debug("abs_dst_filepath: '%s'", self.path_helper.abs_dst_filepath)
log.debug("abs_dst_hash_filepath: '%s'", self.path_helper.abs_dst_hash_filepath)
log.debug("abs_dst_dir: '%s'", self.path_helper.abs_dst_path)
if not self.path_helper.abs_dst_path.is_dir():
try:
self.path_helper.abs_dst_path.makedirs(mode=phlb_config.default_new_path_mode) # depends on [control=['try'], data=[]]
except OSError as err:
raise BackupFileError('Error creating out path: %s' % err) # depends on [control=['except'], data=['err']] # depends on [control=['if'], data=[]]
else:
assert not self.path_helper.abs_dst_filepath.is_file(), 'Out file already exists: %r' % self.path_helper.abs_src_filepath
try:
try:
with self.path_helper.abs_src_filepath.open('rb') as in_file:
with self.path_helper.abs_dst_hash_filepath.open('w') as hash_file:
with self.path_helper.abs_dst_filepath.open('wb') as out_file:
hash = self._deduplication_backup(self.dir_path, in_file, out_file, process_bar) # depends on [control=['with'], data=['out_file']]
hash_hexdigest = hash.hexdigest()
hash_file.write(hash_hexdigest) # depends on [control=['with'], data=['hash_file']] # depends on [control=['with'], data=['in_file']] # depends on [control=['try'], data=[]]
except OSError as err:
# FIXME: Better error message
raise BackupFileError('Skip file %s error: %s' % (self.path_helper.abs_src_filepath, err)) # depends on [control=['except'], data=['err']] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
# Try to remove created files
try:
self.path_helper.abs_dst_filepath.unlink() # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
try:
self.path_helper.abs_dst_hash_filepath.unlink() # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
raise KeyboardInterrupt # depends on [control=['except'], data=[]]
old_backup_entry = deduplicate(self.path_helper.abs_dst_filepath, hash_hexdigest)
if old_backup_entry is None:
log.debug('File is unique.')
self.file_linked = False # Was a hardlink used? # depends on [control=['if'], data=[]]
else:
log.debug('File was deduplicated via hardlink to: %s' % old_backup_entry)
self.file_linked = True # Was a hardlink used?
# set origin access/modified times to the new created backup file
atime_ns = self.dir_path.stat.st_atime_ns
mtime_ns = self.dir_path.stat.st_mtime_ns
self.path_helper.abs_dst_filepath.utime(ns=(atime_ns, mtime_ns)) # call os.utime()
log.debug('Set mtime to: %s' % mtime_ns)
BackupEntry.objects.create(backup_run=self.backup_run, backup_entry_path=self.path_helper.abs_dst_filepath, hash_hexdigest=hash_hexdigest)
self.fast_backup = False |
def squash_sequence(input_layer):
""""Squashes a sequence into a single Tensor with dim 1 being time*batch.
A sequence is an array of Tensors, which is not appropriate for most
operations, this squashes them together into Tensor.
Defaults are assigned such that cleave_sequence requires no args.
Args:
input_layer: The input layer.
Returns:
A PrettyTensor containing a single tensor with the first dim containing
both time and batch.
Raises:
ValueError: If the sequence is empty.
"""
timesteps = len(input_layer.sequence)
if not timesteps:
raise ValueError('Empty tensor sequence.')
elif timesteps == 1:
result = input_layer.sequence[0]
else:
result = tf.concat(input_layer.sequence, 0)
return input_layer.with_tensor(result).with_defaults(unroll=timesteps) | def function[squash_sequence, parameter[input_layer]]:
constant["Squashes a sequence into a single Tensor with dim 1 being time*batch.
A sequence is an array of Tensors, which is not appropriate for most
operations, this squashes them together into Tensor.
Defaults are assigned such that cleave_sequence requires no args.
Args:
input_layer: The input layer.
Returns:
A PrettyTensor containing a single tensor with the first dim containing
both time and batch.
Raises:
ValueError: If the sequence is empty.
]
variable[timesteps] assign[=] call[name[len], parameter[name[input_layer].sequence]]
if <ast.UnaryOp object at 0x7da18ede6350> begin[:]
<ast.Raise object at 0x7da18ede7820>
return[call[call[name[input_layer].with_tensor, parameter[name[result]]].with_defaults, parameter[]]] | keyword[def] identifier[squash_sequence] ( identifier[input_layer] ):
literal[string]
identifier[timesteps] = identifier[len] ( identifier[input_layer] . identifier[sequence] )
keyword[if] keyword[not] identifier[timesteps] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[timesteps] == literal[int] :
identifier[result] = identifier[input_layer] . identifier[sequence] [ literal[int] ]
keyword[else] :
identifier[result] = identifier[tf] . identifier[concat] ( identifier[input_layer] . identifier[sequence] , literal[int] )
keyword[return] identifier[input_layer] . identifier[with_tensor] ( identifier[result] ). identifier[with_defaults] ( identifier[unroll] = identifier[timesteps] ) | def squash_sequence(input_layer):
""""Squashes a sequence into a single Tensor with dim 1 being time*batch.
A sequence is an array of Tensors, which is not appropriate for most
operations, this squashes them together into Tensor.
Defaults are assigned such that cleave_sequence requires no args.
Args:
input_layer: The input layer.
Returns:
A PrettyTensor containing a single tensor with the first dim containing
both time and batch.
Raises:
ValueError: If the sequence is empty.
"""
timesteps = len(input_layer.sequence)
if not timesteps:
raise ValueError('Empty tensor sequence.') # depends on [control=['if'], data=[]]
elif timesteps == 1:
result = input_layer.sequence[0] # depends on [control=['if'], data=[]]
else:
result = tf.concat(input_layer.sequence, 0)
return input_layer.with_tensor(result).with_defaults(unroll=timesteps) |
def _StartDebugger():
"""Configures and starts the debugger."""
global _hub_client
global _breakpoints_manager
cdbg_native.InitializeModule(_flags)
_hub_client = gcp_hub_client.GcpHubClient()
visibility_policy = _GetVisibilityPolicy()
_breakpoints_manager = breakpoints_manager.BreakpointsManager(
_hub_client,
visibility_policy)
# Set up loggers for logpoints.
capture_collector.SetLogger(logging.getLogger())
capture_collector.CaptureCollector.pretty_printers.append(
appengine_pretty_printers.PrettyPrinter)
_hub_client.on_active_breakpoints_changed = (
_breakpoints_manager.SetActiveBreakpoints)
_hub_client.on_idle = _breakpoints_manager.CheckBreakpointsExpiration
_hub_client.SetupAuth(
_flags.get('project_id'),
_flags.get('project_number'),
_flags.get('service_account_json_file'))
_hub_client.InitializeDebuggeeLabels(_flags)
_hub_client.Start() | def function[_StartDebugger, parameter[]]:
constant[Configures and starts the debugger.]
<ast.Global object at 0x7da20c6aa650>
<ast.Global object at 0x7da20c6ab3a0>
call[name[cdbg_native].InitializeModule, parameter[name[_flags]]]
variable[_hub_client] assign[=] call[name[gcp_hub_client].GcpHubClient, parameter[]]
variable[visibility_policy] assign[=] call[name[_GetVisibilityPolicy], parameter[]]
variable[_breakpoints_manager] assign[=] call[name[breakpoints_manager].BreakpointsManager, parameter[name[_hub_client], name[visibility_policy]]]
call[name[capture_collector].SetLogger, parameter[call[name[logging].getLogger, parameter[]]]]
call[name[capture_collector].CaptureCollector.pretty_printers.append, parameter[name[appengine_pretty_printers].PrettyPrinter]]
name[_hub_client].on_active_breakpoints_changed assign[=] name[_breakpoints_manager].SetActiveBreakpoints
name[_hub_client].on_idle assign[=] name[_breakpoints_manager].CheckBreakpointsExpiration
call[name[_hub_client].SetupAuth, parameter[call[name[_flags].get, parameter[constant[project_id]]], call[name[_flags].get, parameter[constant[project_number]]], call[name[_flags].get, parameter[constant[service_account_json_file]]]]]
call[name[_hub_client].InitializeDebuggeeLabels, parameter[name[_flags]]]
call[name[_hub_client].Start, parameter[]] | keyword[def] identifier[_StartDebugger] ():
literal[string]
keyword[global] identifier[_hub_client]
keyword[global] identifier[_breakpoints_manager]
identifier[cdbg_native] . identifier[InitializeModule] ( identifier[_flags] )
identifier[_hub_client] = identifier[gcp_hub_client] . identifier[GcpHubClient] ()
identifier[visibility_policy] = identifier[_GetVisibilityPolicy] ()
identifier[_breakpoints_manager] = identifier[breakpoints_manager] . identifier[BreakpointsManager] (
identifier[_hub_client] ,
identifier[visibility_policy] )
identifier[capture_collector] . identifier[SetLogger] ( identifier[logging] . identifier[getLogger] ())
identifier[capture_collector] . identifier[CaptureCollector] . identifier[pretty_printers] . identifier[append] (
identifier[appengine_pretty_printers] . identifier[PrettyPrinter] )
identifier[_hub_client] . identifier[on_active_breakpoints_changed] =(
identifier[_breakpoints_manager] . identifier[SetActiveBreakpoints] )
identifier[_hub_client] . identifier[on_idle] = identifier[_breakpoints_manager] . identifier[CheckBreakpointsExpiration]
identifier[_hub_client] . identifier[SetupAuth] (
identifier[_flags] . identifier[get] ( literal[string] ),
identifier[_flags] . identifier[get] ( literal[string] ),
identifier[_flags] . identifier[get] ( literal[string] ))
identifier[_hub_client] . identifier[InitializeDebuggeeLabels] ( identifier[_flags] )
identifier[_hub_client] . identifier[Start] () | def _StartDebugger():
"""Configures and starts the debugger."""
global _hub_client
global _breakpoints_manager
cdbg_native.InitializeModule(_flags)
_hub_client = gcp_hub_client.GcpHubClient()
visibility_policy = _GetVisibilityPolicy()
_breakpoints_manager = breakpoints_manager.BreakpointsManager(_hub_client, visibility_policy)
# Set up loggers for logpoints.
capture_collector.SetLogger(logging.getLogger())
capture_collector.CaptureCollector.pretty_printers.append(appengine_pretty_printers.PrettyPrinter)
_hub_client.on_active_breakpoints_changed = _breakpoints_manager.SetActiveBreakpoints
_hub_client.on_idle = _breakpoints_manager.CheckBreakpointsExpiration
_hub_client.SetupAuth(_flags.get('project_id'), _flags.get('project_number'), _flags.get('service_account_json_file'))
_hub_client.InitializeDebuggeeLabels(_flags)
_hub_client.Start() |
def list_cover(list1, list2):
r"""
returns boolean for each position in list1 if it is in list2
Args:
list1 (list):
list2 (list):
Returns:
list: incover_list - true where list1 intersects list2
CommandLine:
python -m utool.util_list --test-list_cover
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> # build test data
>>> list1 = [1, 2, 3, 4, 5, 6]
>>> list2 = [2, 3, 6]
>>> # execute function
>>> incover_list = list_cover(list1, list2)
>>> # verify results
>>> result = str(incover_list)
>>> print(result)
[False, True, True, False, False, True]
"""
set2 = set(list2)
incover_list = [item1 in set2 for item1 in list1]
return incover_list | def function[list_cover, parameter[list1, list2]]:
constant[
returns boolean for each position in list1 if it is in list2
Args:
list1 (list):
list2 (list):
Returns:
list: incover_list - true where list1 intersects list2
CommandLine:
python -m utool.util_list --test-list_cover
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> # build test data
>>> list1 = [1, 2, 3, 4, 5, 6]
>>> list2 = [2, 3, 6]
>>> # execute function
>>> incover_list = list_cover(list1, list2)
>>> # verify results
>>> result = str(incover_list)
>>> print(result)
[False, True, True, False, False, True]
]
variable[set2] assign[=] call[name[set], parameter[name[list2]]]
variable[incover_list] assign[=] <ast.ListComp object at 0x7da1b24e8d00>
return[name[incover_list]] | keyword[def] identifier[list_cover] ( identifier[list1] , identifier[list2] ):
literal[string]
identifier[set2] = identifier[set] ( identifier[list2] )
identifier[incover_list] =[ identifier[item1] keyword[in] identifier[set2] keyword[for] identifier[item1] keyword[in] identifier[list1] ]
keyword[return] identifier[incover_list] | def list_cover(list1, list2):
"""
returns boolean for each position in list1 if it is in list2
Args:
list1 (list):
list2 (list):
Returns:
list: incover_list - true where list1 intersects list2
CommandLine:
python -m utool.util_list --test-list_cover
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> # build test data
>>> list1 = [1, 2, 3, 4, 5, 6]
>>> list2 = [2, 3, 6]
>>> # execute function
>>> incover_list = list_cover(list1, list2)
>>> # verify results
>>> result = str(incover_list)
>>> print(result)
[False, True, True, False, False, True]
"""
set2 = set(list2)
incover_list = [item1 in set2 for item1 in list1]
return incover_list |
def each(iterable = None, *, name = None, metric = call_default):
"""Measure time elapsed to produce each item of an iterable
:arg iterable: any iterable
:arg function metric: f(name, 1, time)
:arg str name: name for the metric
"""
if iterable is None:
return _each_decorator(name, metric)
else:
return _do_each(iterable, name, metric) | def function[each, parameter[iterable]]:
constant[Measure time elapsed to produce each item of an iterable
:arg iterable: any iterable
:arg function metric: f(name, 1, time)
:arg str name: name for the metric
]
if compare[name[iterable] is constant[None]] begin[:]
return[call[name[_each_decorator], parameter[name[name], name[metric]]]] | keyword[def] identifier[each] ( identifier[iterable] = keyword[None] ,*, identifier[name] = keyword[None] , identifier[metric] = identifier[call_default] ):
literal[string]
keyword[if] identifier[iterable] keyword[is] keyword[None] :
keyword[return] identifier[_each_decorator] ( identifier[name] , identifier[metric] )
keyword[else] :
keyword[return] identifier[_do_each] ( identifier[iterable] , identifier[name] , identifier[metric] ) | def each(iterable=None, *, name=None, metric=call_default):
"""Measure time elapsed to produce each item of an iterable
:arg iterable: any iterable
:arg function metric: f(name, 1, time)
:arg str name: name for the metric
"""
if iterable is None:
return _each_decorator(name, metric) # depends on [control=['if'], data=[]]
else:
return _do_each(iterable, name, metric) |
def dump_links(self, o):
"""Dump links."""
params = {'versionId': o.version_id}
data = {
'self': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**(params if not o.is_head or o.deleted else {})
),
'version': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**params
)
}
if o.is_head and not o.deleted:
data.update({'uploads': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True
) + '?uploads', })
return data | def function[dump_links, parameter[self, o]]:
constant[Dump links.]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b19cab60>], [<ast.Attribute object at 0x7da1b19cbe80>]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b19cbc10>, <ast.Constant object at 0x7da1b19cbd60>], [<ast.Call object at 0x7da1b19b2020>, <ast.Call object at 0x7da1b19b15d0>]]
if <ast.BoolOp object at 0x7da1b19b17e0> begin[:]
call[name[data].update, parameter[dictionary[[<ast.Constant object at 0x7da1b19b1b70>], [<ast.BinOp object at 0x7da1b19b3520>]]]]
return[name[data]] | keyword[def] identifier[dump_links] ( identifier[self] , identifier[o] ):
literal[string]
identifier[params] ={ literal[string] : identifier[o] . identifier[version_id] }
identifier[data] ={
literal[string] : identifier[url_for] (
literal[string] ,
identifier[bucket_id] = identifier[o] . identifier[bucket_id] ,
identifier[key] = identifier[o] . identifier[key] ,
identifier[_external] = keyword[True] ,
**( identifier[params] keyword[if] keyword[not] identifier[o] . identifier[is_head] keyword[or] identifier[o] . identifier[deleted] keyword[else] {})
),
literal[string] : identifier[url_for] (
literal[string] ,
identifier[bucket_id] = identifier[o] . identifier[bucket_id] ,
identifier[key] = identifier[o] . identifier[key] ,
identifier[_external] = keyword[True] ,
** identifier[params]
)
}
keyword[if] identifier[o] . identifier[is_head] keyword[and] keyword[not] identifier[o] . identifier[deleted] :
identifier[data] . identifier[update] ({ literal[string] : identifier[url_for] (
literal[string] ,
identifier[bucket_id] = identifier[o] . identifier[bucket_id] ,
identifier[key] = identifier[o] . identifier[key] ,
identifier[_external] = keyword[True]
)+ literal[string] ,})
keyword[return] identifier[data] | def dump_links(self, o):
"""Dump links."""
params = {'versionId': o.version_id}
data = {'self': url_for('.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **params if not o.is_head or o.deleted else {}), 'version': url_for('.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **params)}
if o.is_head and (not o.deleted):
data.update({'uploads': url_for('.object_api', bucket_id=o.bucket_id, key=o.key, _external=True) + '?uploads'}) # depends on [control=['if'], data=[]]
return data |
def strip_suffix(string, suffix, regex=False):
"""Strip the suffix from the string.
If 'regex' is specified, suffix is understood as a regular expression."""
if not isinstance(string, six.string_types) or not isinstance(suffix, six.string_types):
msg = 'Arguments to strip_suffix must be string types. Are: {s}, {p}'\
.format(s=type(string), p=type(suffix))
raise TypeError(msg)
if not regex:
suffix = re.escape(suffix)
if not suffix.endswith('$'):
suffix = '({s})$'.format(s=suffix)
return _strip(string, suffix) | def function[strip_suffix, parameter[string, suffix, regex]]:
constant[Strip the suffix from the string.
If 'regex' is specified, suffix is understood as a regular expression.]
if <ast.BoolOp object at 0x7da1b0fadab0> begin[:]
variable[msg] assign[=] call[constant[Arguments to strip_suffix must be string types. Are: {s}, {p}].format, parameter[]]
<ast.Raise object at 0x7da1b0facb20>
if <ast.UnaryOp object at 0x7da1b0fafc40> begin[:]
variable[suffix] assign[=] call[name[re].escape, parameter[name[suffix]]]
if <ast.UnaryOp object at 0x7da1b0faea10> begin[:]
variable[suffix] assign[=] call[constant[({s})$].format, parameter[]]
return[call[name[_strip], parameter[name[string], name[suffix]]]] | keyword[def] identifier[strip_suffix] ( identifier[string] , identifier[suffix] , identifier[regex] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[string] , identifier[six] . identifier[string_types] ) keyword[or] keyword[not] identifier[isinstance] ( identifier[suffix] , identifier[six] . identifier[string_types] ):
identifier[msg] = literal[string] . identifier[format] ( identifier[s] = identifier[type] ( identifier[string] ), identifier[p] = identifier[type] ( identifier[suffix] ))
keyword[raise] identifier[TypeError] ( identifier[msg] )
keyword[if] keyword[not] identifier[regex] :
identifier[suffix] = identifier[re] . identifier[escape] ( identifier[suffix] )
keyword[if] keyword[not] identifier[suffix] . identifier[endswith] ( literal[string] ):
identifier[suffix] = literal[string] . identifier[format] ( identifier[s] = identifier[suffix] )
keyword[return] identifier[_strip] ( identifier[string] , identifier[suffix] ) | def strip_suffix(string, suffix, regex=False):
"""Strip the suffix from the string.
If 'regex' is specified, suffix is understood as a regular expression."""
if not isinstance(string, six.string_types) or not isinstance(suffix, six.string_types):
msg = 'Arguments to strip_suffix must be string types. Are: {s}, {p}'.format(s=type(string), p=type(suffix))
raise TypeError(msg) # depends on [control=['if'], data=[]]
if not regex:
suffix = re.escape(suffix) # depends on [control=['if'], data=[]]
if not suffix.endswith('$'):
suffix = '({s})$'.format(s=suffix) # depends on [control=['if'], data=[]]
return _strip(string, suffix) |
def shell(self, term='xterm'):
""" Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session.
"""
channel = self._ssh.invoke_shell(term)
self._bridge(channel)
channel.close() | def function[shell, parameter[self, term]]:
constant[ Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session.
]
variable[channel] assign[=] call[name[self]._ssh.invoke_shell, parameter[name[term]]]
call[name[self]._bridge, parameter[name[channel]]]
call[name[channel].close, parameter[]] | keyword[def] identifier[shell] ( identifier[self] , identifier[term] = literal[string] ):
literal[string]
identifier[channel] = identifier[self] . identifier[_ssh] . identifier[invoke_shell] ( identifier[term] )
identifier[self] . identifier[_bridge] ( identifier[channel] )
identifier[channel] . identifier[close] () | def shell(self, term='xterm'):
""" Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session.
"""
channel = self._ssh.invoke_shell(term)
self._bridge(channel)
channel.close() |
def getVariantSet(self, id_):
"""
Returns the readgroup set with the specified ID.
"""
compoundId = datamodel.VariantSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.dataset_id)
return dataset.getVariantSet(id_) | def function[getVariantSet, parameter[self, id_]]:
constant[
Returns the readgroup set with the specified ID.
]
variable[compoundId] assign[=] call[name[datamodel].VariantSetCompoundId.parse, parameter[name[id_]]]
variable[dataset] assign[=] call[name[self].getDataset, parameter[name[compoundId].dataset_id]]
return[call[name[dataset].getVariantSet, parameter[name[id_]]]] | keyword[def] identifier[getVariantSet] ( identifier[self] , identifier[id_] ):
literal[string]
identifier[compoundId] = identifier[datamodel] . identifier[VariantSetCompoundId] . identifier[parse] ( identifier[id_] )
identifier[dataset] = identifier[self] . identifier[getDataset] ( identifier[compoundId] . identifier[dataset_id] )
keyword[return] identifier[dataset] . identifier[getVariantSet] ( identifier[id_] ) | def getVariantSet(self, id_):
"""
Returns the readgroup set with the specified ID.
"""
compoundId = datamodel.VariantSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.dataset_id)
return dataset.getVariantSet(id_) |
def plot_sim(ax, cell, synapse, grid_electrode, point_electrode, letter='a'):
'''create a plot'''
fig = plt.figure(figsize = (3.27*2/3, 3.27*2/3))
ax = fig.add_axes([.1,.05,.9,.9], aspect='equal', frameon=False)
phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=letter, fontsize=16)
cax = fig.add_axes([0.8, 0.2, 0.02, 0.2], frameon=False)
LFP = np.max(np.abs(grid_electrode.LFP),1).reshape(X.shape)
im = ax.contour(X, Z, np.log10(LFP),
50,
cmap='RdBu',
linewidths=1.5,
zorder=-2)
cbar = fig.colorbar(im, cax=cax)
cbar.set_label('$|\phi(\mathbf{r}, t)|_\mathrm{max}$ (nV)')
cbar.outline.set_visible(False)
#get some log-linear tickmarks and ticklabels
ticks = np.arange(np.ceil(np.log10(LFP.min())), np.ceil(np.log10(LFP.max())))
cbar.set_ticks(ticks)
cbar.set_ticklabels(10.**ticks * 1E6) #mv -> nV
zips = []
for x, z in cell.get_idx_polygons():
zips.append(zip(x, z))
polycol = PolyCollection(zips,
edgecolors='k',
linewidths=0.5,
facecolors='k')
ax.add_collection(polycol)
ax.plot([100, 200], [-400, -400], 'k', lw=1, clip_on=False)
ax.text(150, -470, r'100$\mu$m', va='center', ha='center')
ax.axis('off')
ax.plot(cell.xmid[cell.synidx],cell.zmid[cell.synidx], 'o', ms=5,
markeredgecolor='k',
markerfacecolor='r')
color_vec = ['blue','green']
for i in xrange(2):
ax.plot(point_electrode_parameters['x'][i],
point_electrode_parameters['z'][i],'o',ms=6,
markeredgecolor='none',
markerfacecolor=color_vec[i])
plt.axes([.11, .075, .25, .2])
plt.plot(cell.tvec,point_electrode.LFP[0]*1e6,color=color_vec[0], clip_on=False)
plt.plot(cell.tvec,point_electrode.LFP[1]*1e6,color=color_vec[1], clip_on=False)
plt.axis('tight')
ax = plt.gca()
ax.set_ylabel(r'$\phi(\mathbf{r}, t)$ (nV)') #rotation='horizontal')
ax.set_xlabel('$t$ (ms)', va='center')
for loc, spine in ax.spines.iteritems():
if loc in ['right', 'top']:
spine.set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.axes([.11, 0.285, .25, .2])
plt.plot(cell.tvec,synapse.i*1E3, color='red', clip_on=False)
plt.axis('tight')
ax = plt.gca()
ax.set_ylabel(r'$I_{i, j}(t)$ (pA)', ha='center', va='center') #, rotation='horizontal')
for loc, spine in ax.spines.iteritems():
if loc in ['right', 'top']:
spine.set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xticklabels([])
return fig | def function[plot_sim, parameter[ax, cell, synapse, grid_electrode, point_electrode, letter]]:
constant[create a plot]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[fig].add_axes, parameter[list[[<ast.Constant object at 0x7da1b0bdf7f0>, <ast.Constant object at 0x7da1b0bdf7c0>, <ast.Constant object at 0x7da1b0bdf790>, <ast.Constant object at 0x7da1b0bdf760>]]]]
call[name[phlp].annotate_subplot, parameter[name[ax]]]
variable[cax] assign[=] call[name[fig].add_axes, parameter[list[[<ast.Constant object at 0x7da1b0bdf2b0>, <ast.Constant object at 0x7da1b0bdf280>, <ast.Constant object at 0x7da1b0bdf250>, <ast.Constant object at 0x7da1b0bdf220>]]]]
variable[LFP] assign[=] call[call[name[np].max, parameter[call[name[np].abs, parameter[name[grid_electrode].LFP]], constant[1]]].reshape, parameter[name[X].shape]]
variable[im] assign[=] call[name[ax].contour, parameter[name[X], name[Z], call[name[np].log10, parameter[name[LFP]]], constant[50]]]
variable[cbar] assign[=] call[name[fig].colorbar, parameter[name[im]]]
call[name[cbar].set_label, parameter[constant[$|\phi(\mathbf{r}, t)|_\mathrm{max}$ (nV)]]]
call[name[cbar].outline.set_visible, parameter[constant[False]]]
variable[ticks] assign[=] call[name[np].arange, parameter[call[name[np].ceil, parameter[call[name[np].log10, parameter[call[name[LFP].min, parameter[]]]]]], call[name[np].ceil, parameter[call[name[np].log10, parameter[call[name[LFP].max, parameter[]]]]]]]]
call[name[cbar].set_ticks, parameter[name[ticks]]]
call[name[cbar].set_ticklabels, parameter[binary_operation[binary_operation[constant[10.0] ** name[ticks]] * constant[1000000.0]]]]
variable[zips] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0bddf00>, <ast.Name object at 0x7da1b0bdded0>]]] in starred[call[name[cell].get_idx_polygons, parameter[]]] begin[:]
call[name[zips].append, parameter[call[name[zip], parameter[name[x], name[z]]]]]
variable[polycol] assign[=] call[name[PolyCollection], parameter[name[zips]]]
call[name[ax].add_collection, parameter[name[polycol]]]
call[name[ax].plot, parameter[list[[<ast.Constant object at 0x7da1b0bdd840>, <ast.Constant object at 0x7da1b0bdd810>]], list[[<ast.UnaryOp object at 0x7da1b0bdd7b0>, <ast.UnaryOp object at 0x7da1b0bdd750>]], constant[k]]]
call[name[ax].text, parameter[constant[150], <ast.UnaryOp object at 0x7da1b0bdc0a0>, constant[100$\mu$m]]]
call[name[ax].axis, parameter[constant[off]]]
call[name[ax].plot, parameter[call[name[cell].xmid][name[cell].synidx], call[name[cell].zmid][name[cell].synidx], constant[o]]]
variable[color_vec] assign[=] list[[<ast.Constant object at 0x7da1b0bdc7f0>, <ast.Constant object at 0x7da1b0bdc820>]]
for taget[name[i]] in starred[call[name[xrange], parameter[constant[2]]]] begin[:]
call[name[ax].plot, parameter[call[call[name[point_electrode_parameters]][constant[x]]][name[i]], call[call[name[point_electrode_parameters]][constant[z]]][name[i]], constant[o]]]
call[name[plt].axes, parameter[list[[<ast.Constant object at 0x7da1b0bdceb0>, <ast.Constant object at 0x7da1b0bdcee0>, <ast.Constant object at 0x7da1b0bdcf10>, <ast.Constant object at 0x7da1b0bdcf40>]]]]
call[name[plt].plot, parameter[name[cell].tvec, binary_operation[call[name[point_electrode].LFP][constant[0]] * constant[1000000.0]]]]
call[name[plt].plot, parameter[name[cell].tvec, binary_operation[call[name[point_electrode].LFP][constant[1]] * constant[1000000.0]]]]
call[name[plt].axis, parameter[constant[tight]]]
variable[ax] assign[=] call[name[plt].gca, parameter[]]
call[name[ax].set_ylabel, parameter[constant[$\phi(\mathbf{r}, t)$ (nV)]]]
call[name[ax].set_xlabel, parameter[constant[$t$ (ms)]]]
for taget[tuple[[<ast.Name object at 0x7da1b0baf970>, <ast.Name object at 0x7da1b0baf940>]]] in starred[call[name[ax].spines.iteritems, parameter[]]] begin[:]
if compare[name[loc] in list[[<ast.Constant object at 0x7da1b0baf790>, <ast.Constant object at 0x7da1b0baf760>]]] begin[:]
call[name[spine].set_color, parameter[constant[none]]]
call[name[ax].xaxis.set_ticks_position, parameter[constant[bottom]]]
call[name[ax].yaxis.set_ticks_position, parameter[constant[left]]]
call[name[plt].axes, parameter[list[[<ast.Constant object at 0x7da1b0baf280>, <ast.Constant object at 0x7da1b0baf250>, <ast.Constant object at 0x7da1b0baf220>, <ast.Constant object at 0x7da1b0baf1f0>]]]]
call[name[plt].plot, parameter[name[cell].tvec, binary_operation[name[synapse].i * constant[1000.0]]]]
call[name[plt].axis, parameter[constant[tight]]]
variable[ax] assign[=] call[name[plt].gca, parameter[]]
call[name[ax].set_ylabel, parameter[constant[$I_{i, j}(t)$ (pA)]]]
for taget[tuple[[<ast.Name object at 0x7da1b0baea70>, <ast.Name object at 0x7da1b0baea40>]]] in starred[call[name[ax].spines.iteritems, parameter[]]] begin[:]
if compare[name[loc] in list[[<ast.Constant object at 0x7da1b0bae890>, <ast.Constant object at 0x7da1b0bae860>]]] begin[:]
call[name[spine].set_color, parameter[constant[none]]]
call[name[ax].xaxis.set_ticks_position, parameter[constant[bottom]]]
call[name[ax].yaxis.set_ticks_position, parameter[constant[left]]]
call[name[ax].set_xticklabels, parameter[list[[]]]]
return[name[fig]] | keyword[def] identifier[plot_sim] ( identifier[ax] , identifier[cell] , identifier[synapse] , identifier[grid_electrode] , identifier[point_electrode] , identifier[letter] = literal[string] ):
literal[string]
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] / literal[int] , literal[int] * literal[int] / literal[int] ))
identifier[ax] = identifier[fig] . identifier[add_axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[aspect] = literal[string] , identifier[frameon] = keyword[False] )
identifier[phlp] . identifier[annotate_subplot] ( identifier[ax] , identifier[ncols] = literal[int] , identifier[nrows] = literal[int] , identifier[letter] = identifier[letter] , identifier[fontsize] = literal[int] )
identifier[cax] = identifier[fig] . identifier[add_axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[frameon] = keyword[False] )
identifier[LFP] = identifier[np] . identifier[max] ( identifier[np] . identifier[abs] ( identifier[grid_electrode] . identifier[LFP] ), literal[int] ). identifier[reshape] ( identifier[X] . identifier[shape] )
identifier[im] = identifier[ax] . identifier[contour] ( identifier[X] , identifier[Z] , identifier[np] . identifier[log10] ( identifier[LFP] ),
literal[int] ,
identifier[cmap] = literal[string] ,
identifier[linewidths] = literal[int] ,
identifier[zorder] =- literal[int] )
identifier[cbar] = identifier[fig] . identifier[colorbar] ( identifier[im] , identifier[cax] = identifier[cax] )
identifier[cbar] . identifier[set_label] ( literal[string] )
identifier[cbar] . identifier[outline] . identifier[set_visible] ( keyword[False] )
identifier[ticks] = identifier[np] . identifier[arange] ( identifier[np] . identifier[ceil] ( identifier[np] . identifier[log10] ( identifier[LFP] . identifier[min] ())), identifier[np] . identifier[ceil] ( identifier[np] . identifier[log10] ( identifier[LFP] . identifier[max] ())))
identifier[cbar] . identifier[set_ticks] ( identifier[ticks] )
identifier[cbar] . identifier[set_ticklabels] ( literal[int] ** identifier[ticks] * literal[int] )
identifier[zips] =[]
keyword[for] identifier[x] , identifier[z] keyword[in] identifier[cell] . identifier[get_idx_polygons] ():
identifier[zips] . identifier[append] ( identifier[zip] ( identifier[x] , identifier[z] ))
identifier[polycol] = identifier[PolyCollection] ( identifier[zips] ,
identifier[edgecolors] = literal[string] ,
identifier[linewidths] = literal[int] ,
identifier[facecolors] = literal[string] )
identifier[ax] . identifier[add_collection] ( identifier[polycol] )
identifier[ax] . identifier[plot] ([ literal[int] , literal[int] ],[- literal[int] ,- literal[int] ], literal[string] , identifier[lw] = literal[int] , identifier[clip_on] = keyword[False] )
identifier[ax] . identifier[text] ( literal[int] ,- literal[int] , literal[string] , identifier[va] = literal[string] , identifier[ha] = literal[string] )
identifier[ax] . identifier[axis] ( literal[string] )
identifier[ax] . identifier[plot] ( identifier[cell] . identifier[xmid] [ identifier[cell] . identifier[synidx] ], identifier[cell] . identifier[zmid] [ identifier[cell] . identifier[synidx] ], literal[string] , identifier[ms] = literal[int] ,
identifier[markeredgecolor] = literal[string] ,
identifier[markerfacecolor] = literal[string] )
identifier[color_vec] =[ literal[string] , literal[string] ]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] ):
identifier[ax] . identifier[plot] ( identifier[point_electrode_parameters] [ literal[string] ][ identifier[i] ],
identifier[point_electrode_parameters] [ literal[string] ][ identifier[i] ], literal[string] , identifier[ms] = literal[int] ,
identifier[markeredgecolor] = literal[string] ,
identifier[markerfacecolor] = identifier[color_vec] [ identifier[i] ])
identifier[plt] . identifier[axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[plt] . identifier[plot] ( identifier[cell] . identifier[tvec] , identifier[point_electrode] . identifier[LFP] [ literal[int] ]* literal[int] , identifier[color] = identifier[color_vec] [ literal[int] ], identifier[clip_on] = keyword[False] )
identifier[plt] . identifier[plot] ( identifier[cell] . identifier[tvec] , identifier[point_electrode] . identifier[LFP] [ literal[int] ]* literal[int] , identifier[color] = identifier[color_vec] [ literal[int] ], identifier[clip_on] = keyword[False] )
identifier[plt] . identifier[axis] ( literal[string] )
identifier[ax] = identifier[plt] . identifier[gca] ()
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_xlabel] ( literal[string] , identifier[va] = literal[string] )
keyword[for] identifier[loc] , identifier[spine] keyword[in] identifier[ax] . identifier[spines] . identifier[iteritems] ():
keyword[if] identifier[loc] keyword[in] [ literal[string] , literal[string] ]:
identifier[spine] . identifier[set_color] ( literal[string] )
identifier[ax] . identifier[xaxis] . identifier[set_ticks_position] ( literal[string] )
identifier[ax] . identifier[yaxis] . identifier[set_ticks_position] ( literal[string] )
identifier[plt] . identifier[axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[plt] . identifier[plot] ( identifier[cell] . identifier[tvec] , identifier[synapse] . identifier[i] * literal[int] , identifier[color] = literal[string] , identifier[clip_on] = keyword[False] )
identifier[plt] . identifier[axis] ( literal[string] )
identifier[ax] = identifier[plt] . identifier[gca] ()
identifier[ax] . identifier[set_ylabel] ( literal[string] , identifier[ha] = literal[string] , identifier[va] = literal[string] )
keyword[for] identifier[loc] , identifier[spine] keyword[in] identifier[ax] . identifier[spines] . identifier[iteritems] ():
keyword[if] identifier[loc] keyword[in] [ literal[string] , literal[string] ]:
identifier[spine] . identifier[set_color] ( literal[string] )
identifier[ax] . identifier[xaxis] . identifier[set_ticks_position] ( literal[string] )
identifier[ax] . identifier[yaxis] . identifier[set_ticks_position] ( literal[string] )
identifier[ax] . identifier[set_xticklabels] ([])
keyword[return] identifier[fig] | def plot_sim(ax, cell, synapse, grid_electrode, point_electrode, letter='a'):
"""create a plot"""
fig = plt.figure(figsize=(3.27 * 2 / 3, 3.27 * 2 / 3))
ax = fig.add_axes([0.1, 0.05, 0.9, 0.9], aspect='equal', frameon=False)
phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=letter, fontsize=16)
cax = fig.add_axes([0.8, 0.2, 0.02, 0.2], frameon=False)
LFP = np.max(np.abs(grid_electrode.LFP), 1).reshape(X.shape)
im = ax.contour(X, Z, np.log10(LFP), 50, cmap='RdBu', linewidths=1.5, zorder=-2)
cbar = fig.colorbar(im, cax=cax)
cbar.set_label('$|\\phi(\\mathbf{r}, t)|_\\mathrm{max}$ (nV)')
cbar.outline.set_visible(False)
#get some log-linear tickmarks and ticklabels
ticks = np.arange(np.ceil(np.log10(LFP.min())), np.ceil(np.log10(LFP.max())))
cbar.set_ticks(ticks)
cbar.set_ticklabels(10.0 ** ticks * 1000000.0) #mv -> nV
zips = []
for (x, z) in cell.get_idx_polygons():
zips.append(zip(x, z)) # depends on [control=['for'], data=[]]
polycol = PolyCollection(zips, edgecolors='k', linewidths=0.5, facecolors='k')
ax.add_collection(polycol)
ax.plot([100, 200], [-400, -400], 'k', lw=1, clip_on=False)
ax.text(150, -470, '100$\\mu$m', va='center', ha='center')
ax.axis('off')
ax.plot(cell.xmid[cell.synidx], cell.zmid[cell.synidx], 'o', ms=5, markeredgecolor='k', markerfacecolor='r')
color_vec = ['blue', 'green']
for i in xrange(2):
ax.plot(point_electrode_parameters['x'][i], point_electrode_parameters['z'][i], 'o', ms=6, markeredgecolor='none', markerfacecolor=color_vec[i]) # depends on [control=['for'], data=['i']]
plt.axes([0.11, 0.075, 0.25, 0.2])
plt.plot(cell.tvec, point_electrode.LFP[0] * 1000000.0, color=color_vec[0], clip_on=False)
plt.plot(cell.tvec, point_electrode.LFP[1] * 1000000.0, color=color_vec[1], clip_on=False)
plt.axis('tight')
ax = plt.gca()
ax.set_ylabel('$\\phi(\\mathbf{r}, t)$ (nV)') #rotation='horizontal')
ax.set_xlabel('$t$ (ms)', va='center')
for (loc, spine) in ax.spines.iteritems():
if loc in ['right', 'top']:
spine.set_color('none') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.axes([0.11, 0.285, 0.25, 0.2])
plt.plot(cell.tvec, synapse.i * 1000.0, color='red', clip_on=False)
plt.axis('tight')
ax = plt.gca()
ax.set_ylabel('$I_{i, j}(t)$ (pA)', ha='center', va='center') #, rotation='horizontal')
for (loc, spine) in ax.spines.iteritems():
if loc in ['right', 'top']:
spine.set_color('none') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xticklabels([])
return fig |
def singleton(class_):
"""Singleton definition.
Method 1 from
https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
"""
instances = {}
def get_instance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return get_instance | def function[singleton, parameter[class_]]:
constant[Singleton definition.
Method 1 from
https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
]
variable[instances] assign[=] dictionary[[], []]
def function[get_instance, parameter[]]:
if compare[name[class_] <ast.NotIn object at 0x7da2590d7190> name[instances]] begin[:]
call[name[instances]][name[class_]] assign[=] call[name[class_], parameter[<ast.Starred object at 0x7da204565060>]]
return[call[name[instances]][name[class_]]]
return[name[get_instance]] | keyword[def] identifier[singleton] ( identifier[class_] ):
literal[string]
identifier[instances] ={}
keyword[def] identifier[get_instance] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[class_] keyword[not] keyword[in] identifier[instances] :
identifier[instances] [ identifier[class_] ]= identifier[class_] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[instances] [ identifier[class_] ]
keyword[return] identifier[get_instance] | def singleton(class_):
"""Singleton definition.
Method 1 from
https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
"""
instances = {}
def get_instance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs) # depends on [control=['if'], data=['class_', 'instances']]
return instances[class_]
return get_instance |
def _nominal_kernel(x, y, out):
"""Number of features that match exactly"""
for i in range(x.shape[0]):
for j in range(y.shape[0]):
out[i, j] += (x[i, :] == y[j, :]).sum()
return out | def function[_nominal_kernel, parameter[x, y, out]]:
constant[Number of features that match exactly]
for taget[name[i]] in starred[call[name[range], parameter[call[name[x].shape][constant[0]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[call[name[y].shape][constant[0]]]]] begin[:]
<ast.AugAssign object at 0x7da1b16b7940>
return[name[out]] | keyword[def] identifier[_nominal_kernel] ( identifier[x] , identifier[y] , identifier[out] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[x] . identifier[shape] [ literal[int] ]):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[y] . identifier[shape] [ literal[int] ]):
identifier[out] [ identifier[i] , identifier[j] ]+=( identifier[x] [ identifier[i] ,:]== identifier[y] [ identifier[j] ,:]). identifier[sum] ()
keyword[return] identifier[out] | def _nominal_kernel(x, y, out):
"""Number of features that match exactly"""
for i in range(x.shape[0]):
for j in range(y.shape[0]):
out[i, j] += (x[i, :] == y[j, :]).sum() # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return out |
def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
symbols = [s[1:] for s in symbols]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols: continue
else:
if p.symbol not in symbols: continue
pseudos.append(p)
if ret_list:
return pseudos
else:
return self.__class__(pseudos) | def function[select_symbols, parameter[self, symbols, ret_list]]:
constant[
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
]
variable[symbols] assign[=] call[name[list_strings], parameter[name[symbols]]]
variable[exclude] assign[=] call[call[name[symbols]][constant[0]].startswith, parameter[constant[-]]]
if name[exclude] begin[:]
if <ast.UnaryOp object at 0x7da2047eae60> begin[:]
<ast.Raise object at 0x7da20c6e5360>
variable[symbols] assign[=] <ast.ListComp object at 0x7da20c6e76a0>
variable[symbols] assign[=] call[name[set], parameter[name[symbols]]]
variable[pseudos] assign[=] list[[]]
for taget[name[p]] in starred[name[self]] begin[:]
if name[exclude] begin[:]
if compare[name[p].symbol in name[symbols]] begin[:]
continue
call[name[pseudos].append, parameter[name[p]]]
if name[ret_list] begin[:]
return[name[pseudos]] | keyword[def] identifier[select_symbols] ( identifier[self] , identifier[symbols] , identifier[ret_list] = keyword[False] ):
literal[string]
identifier[symbols] = identifier[list_strings] ( identifier[symbols] )
identifier[exclude] = identifier[symbols] [ literal[int] ]. identifier[startswith] ( literal[string] )
keyword[if] identifier[exclude] :
keyword[if] keyword[not] identifier[all] ( identifier[s] . identifier[startswith] ( literal[string] ) keyword[for] identifier[s] keyword[in] identifier[symbols] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[symbols] =[ identifier[s] [ literal[int] :] keyword[for] identifier[s] keyword[in] identifier[symbols] ]
identifier[symbols] = identifier[set] ( identifier[symbols] )
identifier[pseudos] =[]
keyword[for] identifier[p] keyword[in] identifier[self] :
keyword[if] identifier[exclude] :
keyword[if] identifier[p] . identifier[symbol] keyword[in] identifier[symbols] : keyword[continue]
keyword[else] :
keyword[if] identifier[p] . identifier[symbol] keyword[not] keyword[in] identifier[symbols] : keyword[continue]
identifier[pseudos] . identifier[append] ( identifier[p] )
keyword[if] identifier[ret_list] :
keyword[return] identifier[pseudos]
keyword[else] :
keyword[return] identifier[self] . identifier[__class__] ( identifier[pseudos] ) | def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith('-')
if exclude:
if not all((s.startswith('-') for s in symbols)):
raise ValueError('When excluding symbols, all strings must start with `-`') # depends on [control=['if'], data=[]]
symbols = [s[1:] for s in symbols] # depends on [control=['if'], data=[]]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif p.symbol not in symbols:
continue # depends on [control=['if'], data=[]]
pseudos.append(p) # depends on [control=['for'], data=['p']]
if ret_list:
return pseudos # depends on [control=['if'], data=[]]
else:
return self.__class__(pseudos) |
def _get_html_response(url, session):
# type: (str, PipSession) -> Response
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp | def function[_get_html_response, parameter[url, session]]:
constant[Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
]
if call[name[_is_url_like_archive], parameter[name[url]]] begin[:]
call[name[_ensure_html_response], parameter[name[url]]]
call[name[logger].debug, parameter[constant[Getting page %s], name[url]]]
variable[resp] assign[=] call[name[session].get, parameter[name[url]]]
call[name[resp].raise_for_status, parameter[]]
call[name[_ensure_html_header], parameter[name[resp]]]
return[name[resp]] | keyword[def] identifier[_get_html_response] ( identifier[url] , identifier[session] ):
literal[string]
keyword[if] identifier[_is_url_like_archive] ( identifier[url] ):
identifier[_ensure_html_response] ( identifier[url] , identifier[session] = identifier[session] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[url] )
identifier[resp] = identifier[session] . identifier[get] (
identifier[url] ,
identifier[headers] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
},
)
identifier[resp] . identifier[raise_for_status] ()
identifier[_ensure_html_header] ( identifier[resp] )
keyword[return] identifier[resp] | def _get_html_response(url, session):
# type: (str, PipSession) -> Response
'Access an HTML page with GET, and return the response.\n\n This consists of three parts:\n\n 1. If the URL looks suspiciously like an archive, send a HEAD first to\n check the Content-Type is HTML, to avoid downloading a large file.\n Raise `_NotHTTP` if the content type cannot be determined, or\n `_NotHTML` if it is not HTML.\n 2. Actually perform the request. Raise HTTP exceptions on network failures.\n 3. Check the Content-Type header to make sure we got HTML, and raise\n `_NotHTML` otherwise.\n '
if _is_url_like_archive(url):
_ensure_html_response(url, session=session) # depends on [control=['if'], data=[]]
logger.debug('Getting page %s', url)
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
resp = session.get(url, headers={'Accept': 'text/html', 'Cache-Control': 'max-age=0'})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp |
def check_anagrad(fun, x0, epsilon, threshold):
"""Check the analytical gradient using finite differences
Arguments:
| ``fun`` -- the function to be tested, more info below
| ``x0`` -- the reference point around which the function should be
tested
| ``epsilon`` -- a small scalar used for the finite differences
| ``threshold`` -- the maximum acceptable difference between the
analytical gradient and the gradient obtained by
finite differentiation
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False]
"""
N = len(x0)
f0, ana_grad = fun(x0, do_gradient=True)
for i in range(N):
xh = x0.copy()
xh[i] += 0.5*epsilon
xl = x0.copy()
xl[i] -= 0.5*epsilon
num_grad_comp = (fun(xh)-fun(xl))/epsilon
if abs(num_grad_comp - ana_grad[i]) > threshold:
raise AssertionError("Error in the analytical gradient, component %i, got %s, should be about %s" % (i, ana_grad[i], num_grad_comp)) | def function[check_anagrad, parameter[fun, x0, epsilon, threshold]]:
constant[Check the analytical gradient using finite differences
Arguments:
| ``fun`` -- the function to be tested, more info below
| ``x0`` -- the reference point around which the function should be
tested
| ``epsilon`` -- a small scalar used for the finite differences
| ``threshold`` -- the maximum acceptable difference between the
analytical gradient and the gradient obtained by
finite differentiation
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False]
]
variable[N] assign[=] call[name[len], parameter[name[x0]]]
<ast.Tuple object at 0x7da207f02e90> assign[=] call[name[fun], parameter[name[x0]]]
for taget[name[i]] in starred[call[name[range], parameter[name[N]]]] begin[:]
variable[xh] assign[=] call[name[x0].copy, parameter[]]
<ast.AugAssign object at 0x7da207f033a0>
variable[xl] assign[=] call[name[x0].copy, parameter[]]
<ast.AugAssign object at 0x7da207f00850>
variable[num_grad_comp] assign[=] binary_operation[binary_operation[call[name[fun], parameter[name[xh]]] - call[name[fun], parameter[name[xl]]]] / name[epsilon]]
if compare[call[name[abs], parameter[binary_operation[name[num_grad_comp] - call[name[ana_grad]][name[i]]]]] greater[>] name[threshold]] begin[:]
<ast.Raise object at 0x7da207f00760> | keyword[def] identifier[check_anagrad] ( identifier[fun] , identifier[x0] , identifier[epsilon] , identifier[threshold] ):
literal[string]
identifier[N] = identifier[len] ( identifier[x0] )
identifier[f0] , identifier[ana_grad] = identifier[fun] ( identifier[x0] , identifier[do_gradient] = keyword[True] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[N] ):
identifier[xh] = identifier[x0] . identifier[copy] ()
identifier[xh] [ identifier[i] ]+= literal[int] * identifier[epsilon]
identifier[xl] = identifier[x0] . identifier[copy] ()
identifier[xl] [ identifier[i] ]-= literal[int] * identifier[epsilon]
identifier[num_grad_comp] =( identifier[fun] ( identifier[xh] )- identifier[fun] ( identifier[xl] ))/ identifier[epsilon]
keyword[if] identifier[abs] ( identifier[num_grad_comp] - identifier[ana_grad] [ identifier[i] ])> identifier[threshold] :
keyword[raise] identifier[AssertionError] ( literal[string] %( identifier[i] , identifier[ana_grad] [ identifier[i] ], identifier[num_grad_comp] )) | def check_anagrad(fun, x0, epsilon, threshold):
"""Check the analytical gradient using finite differences
Arguments:
| ``fun`` -- the function to be tested, more info below
| ``x0`` -- the reference point around which the function should be
tested
| ``epsilon`` -- a small scalar used for the finite differences
| ``threshold`` -- the maximum acceptable difference between the
analytical gradient and the gradient obtained by
finite differentiation
The function ``fun`` takes a mandatory argument ``x`` and an optional
argument ``do_gradient``:
| ``x`` -- the arguments of the function to be tested
| ``do_gradient`` -- When False, only the function value is returned.
When True, a 2-tuple with the function value and
the gradient are returned [default=False]
"""
N = len(x0)
(f0, ana_grad) = fun(x0, do_gradient=True)
for i in range(N):
xh = x0.copy()
xh[i] += 0.5 * epsilon
xl = x0.copy()
xl[i] -= 0.5 * epsilon
num_grad_comp = (fun(xh) - fun(xl)) / epsilon
if abs(num_grad_comp - ana_grad[i]) > threshold:
raise AssertionError('Error in the analytical gradient, component %i, got %s, should be about %s' % (i, ana_grad[i], num_grad_comp)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] |
def search_and_extract_nucleotides_matching_nucleotide_database(self,
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta):
'''As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
if search_method == "hmmsearch":
# First search the reads using the HMM
search_result, table_list = self.nhmmer(
hmmsearch_output_table,
unpack,
threads,
evalue
)
elif search_method == 'diamond':
raise Exception("Diamond searches not supported for nucelotide databases yet")
if maximum_range:
hits = self._get_read_names(
search_result, # define the span of hits
maximum_range
)
else:
hits = self._get_sequence_directions(search_result)
hit_readnames = hits.keys()
if euk_check:
euk_reads = self._check_euk_contamination(table_list)
hit_readnames = set([read for read in hit_readnames if read not in euk_reads])
hits = {key:item for key, item in hits.iteritems() if key in hit_readnames}
hit_read_count = [len(euk_reads), len(hit_readnames)]
else:
hit_read_count = [0, len(hit_readnames)]
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
result = DBSearchResult(None,
search_result,
hit_read_count,
None)
else:
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
hit_read_count,
slash_endings)
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits)
logging.info("%s read(s) detected" % n_hits)
return result, direction_information | def function[search_and_extract_nucleotides_matching_nucleotide_database, parameter[self, unpack, euk_check, search_method, maximum_range, threads, evalue, hmmsearch_output_table, hit_reads_fasta]]:
constant[As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
]
if compare[name[search_method] equal[==] constant[hmmsearch]] begin[:]
<ast.Tuple object at 0x7da18f720b50> assign[=] call[name[self].nhmmer, parameter[name[hmmsearch_output_table], name[unpack], name[threads], name[evalue]]]
if name[maximum_range] begin[:]
variable[hits] assign[=] call[name[self]._get_read_names, parameter[name[search_result], name[maximum_range]]]
variable[hit_readnames] assign[=] call[name[hits].keys, parameter[]]
if name[euk_check] begin[:]
variable[euk_reads] assign[=] call[name[self]._check_euk_contamination, parameter[name[table_list]]]
variable[hit_readnames] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da18fe937f0>]]
variable[hits] assign[=] <ast.DictComp object at 0x7da18fe93dc0>
variable[hit_read_count] assign[=] list[[<ast.Call object at 0x7da18fe921d0>, <ast.Call object at 0x7da18fe93040>]]
<ast.Tuple object at 0x7da18fe90130> assign[=] call[name[self]._extract_from_raw_reads, parameter[name[hit_reads_fasta], name[hit_readnames], name[unpack].read_file, call[name[unpack].format, parameter[]], name[hits]]]
if <ast.UnaryOp object at 0x7da18fe90190> begin[:]
variable[result] assign[=] call[name[DBSearchResult], parameter[constant[None], name[search_result], name[hit_read_count], constant[None]]]
if name[maximum_range] begin[:]
variable[n_hits] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da18fe90e50>]]
call[name[logging].info, parameter[binary_operation[constant[%s read(s) detected] <ast.Mod object at 0x7da2590d6920> name[n_hits]]]]
return[tuple[[<ast.Name object at 0x7da18fe92470>, <ast.Name object at 0x7da18fe91240>]]] | keyword[def] identifier[search_and_extract_nucleotides_matching_nucleotide_database] ( identifier[self] ,
identifier[unpack] ,
identifier[euk_check] ,
identifier[search_method] ,
identifier[maximum_range] ,
identifier[threads] ,
identifier[evalue] ,
identifier[hmmsearch_output_table] ,
identifier[hit_reads_fasta] ):
literal[string]
keyword[if] identifier[search_method] == literal[string] :
identifier[search_result] , identifier[table_list] = identifier[self] . identifier[nhmmer] (
identifier[hmmsearch_output_table] ,
identifier[unpack] ,
identifier[threads] ,
identifier[evalue]
)
keyword[elif] identifier[search_method] == literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[maximum_range] :
identifier[hits] = identifier[self] . identifier[_get_read_names] (
identifier[search_result] ,
identifier[maximum_range]
)
keyword[else] :
identifier[hits] = identifier[self] . identifier[_get_sequence_directions] ( identifier[search_result] )
identifier[hit_readnames] = identifier[hits] . identifier[keys] ()
keyword[if] identifier[euk_check] :
identifier[euk_reads] = identifier[self] . identifier[_check_euk_contamination] ( identifier[table_list] )
identifier[hit_readnames] = identifier[set] ([ identifier[read] keyword[for] identifier[read] keyword[in] identifier[hit_readnames] keyword[if] identifier[read] keyword[not] keyword[in] identifier[euk_reads] ])
identifier[hits] ={ identifier[key] : identifier[item] keyword[for] identifier[key] , identifier[item] keyword[in] identifier[hits] . identifier[iteritems] () keyword[if] identifier[key] keyword[in] identifier[hit_readnames] }
identifier[hit_read_count] =[ identifier[len] ( identifier[euk_reads] ), identifier[len] ( identifier[hit_readnames] )]
keyword[else] :
identifier[hit_read_count] =[ literal[int] , identifier[len] ( identifier[hit_readnames] )]
identifier[hit_reads_fasta] , identifier[direction_information] = identifier[self] . identifier[_extract_from_raw_reads] (
identifier[hit_reads_fasta] ,
identifier[hit_readnames] ,
identifier[unpack] . identifier[read_file] ,
identifier[unpack] . identifier[format] (),
identifier[hits]
)
keyword[if] keyword[not] identifier[hit_readnames] :
identifier[result] = identifier[DBSearchResult] ( keyword[None] ,
identifier[search_result] ,
identifier[hit_read_count] ,
keyword[None] )
keyword[else] :
identifier[slash_endings] = identifier[self] . identifier[_check_for_slash_endings] ( identifier[hit_readnames] )
identifier[result] = identifier[DBSearchResult] ( identifier[hit_reads_fasta] ,
identifier[search_result] ,
identifier[hit_read_count] ,
identifier[slash_endings] )
keyword[if] identifier[maximum_range] :
identifier[n_hits] = identifier[sum] ([ identifier[len] ( identifier[x] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[hits] . identifier[values] ()])
keyword[else] :
identifier[n_hits] = identifier[len] ( identifier[hits] )
identifier[logging] . identifier[info] ( literal[string] % identifier[n_hits] )
keyword[return] identifier[result] , identifier[direction_information] | def search_and_extract_nucleotides_matching_nucleotide_database(self, unpack, euk_check, search_method, maximum_range, threads, evalue, hmmsearch_output_table, hit_reads_fasta):
"""As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
"""
if search_method == 'hmmsearch':
# First search the reads using the HMM
(search_result, table_list) = self.nhmmer(hmmsearch_output_table, unpack, threads, evalue) # depends on [control=['if'], data=[]]
elif search_method == 'diamond':
raise Exception('Diamond searches not supported for nucelotide databases yet') # depends on [control=['if'], data=[]]
if maximum_range: # define the span of hits
hits = self._get_read_names(search_result, maximum_range) # depends on [control=['if'], data=[]]
else:
hits = self._get_sequence_directions(search_result)
hit_readnames = hits.keys()
if euk_check:
euk_reads = self._check_euk_contamination(table_list)
hit_readnames = set([read for read in hit_readnames if read not in euk_reads])
hits = {key: item for (key, item) in hits.iteritems() if key in hit_readnames}
hit_read_count = [len(euk_reads), len(hit_readnames)] # depends on [control=['if'], data=[]]
else:
hit_read_count = [0, len(hit_readnames)]
(hit_reads_fasta, direction_information) = self._extract_from_raw_reads(hit_reads_fasta, hit_readnames, unpack.read_file, unpack.format(), hits)
if not hit_readnames:
result = DBSearchResult(None, search_result, hit_read_count, None) # depends on [control=['if'], data=[]]
else:
slash_endings = self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta, search_result, hit_read_count, slash_endings)
if maximum_range:
n_hits = sum([len(x['strand']) for x in hits.values()]) # depends on [control=['if'], data=[]]
else:
n_hits = len(hits)
logging.info('%s read(s) detected' % n_hits)
return (result, direction_information) |
def logmid_n(max_n, ratio=1/4.0, nsteps=15):
"""
Creates an array of integers that lie evenly spaced in the "middle" of the
logarithmic scale from 0 to log(max_n).
If max_n is very small and/or nsteps is very large, this may lead to
duplicate values which will be removed from the output.
This function has benefits in hurst_rs, because it cuts away both very small
and very large n, which both can cause problems, and still produces a
logarithmically spaced sequence.
Args:
max_n (int):
largest possible output value (should be the sequence length when used in
hurst_rs)
Kwargs:
ratio (float):
width of the "middle" of the logarithmic interval relative to log(max_n).
For example, for ratio=1/2.0 the logarithm of the resulting values will
lie between 0.25 * log(max_n) and 0.75 * log(max_n).
nsteps (float):
(maximum) number of values to take from the specified range
Returns:
array of int:
a logarithmically spaced sequence of at most nsteps values (may be less,
because only unique values are returned)
"""
l = np.log(max_n)
span = l * ratio
start = l * (1 - ratio) * 0.5
midrange = start + 1.0*np.arange(nsteps)/nsteps*span
nvals = np.round(np.exp(midrange)).astype("int32")
return np.unique(nvals) | def function[logmid_n, parameter[max_n, ratio, nsteps]]:
constant[
Creates an array of integers that lie evenly spaced in the "middle" of the
logarithmic scale from 0 to log(max_n).
If max_n is very small and/or nsteps is very large, this may lead to
duplicate values which will be removed from the output.
This function has benefits in hurst_rs, because it cuts away both very small
and very large n, which both can cause problems, and still produces a
logarithmically spaced sequence.
Args:
max_n (int):
largest possible output value (should be the sequence length when used in
hurst_rs)
Kwargs:
ratio (float):
width of the "middle" of the logarithmic interval relative to log(max_n).
For example, for ratio=1/2.0 the logarithm of the resulting values will
lie between 0.25 * log(max_n) and 0.75 * log(max_n).
nsteps (float):
(maximum) number of values to take from the specified range
Returns:
array of int:
a logarithmically spaced sequence of at most nsteps values (may be less,
because only unique values are returned)
]
variable[l] assign[=] call[name[np].log, parameter[name[max_n]]]
variable[span] assign[=] binary_operation[name[l] * name[ratio]]
variable[start] assign[=] binary_operation[binary_operation[name[l] * binary_operation[constant[1] - name[ratio]]] * constant[0.5]]
variable[midrange] assign[=] binary_operation[name[start] + binary_operation[binary_operation[binary_operation[constant[1.0] * call[name[np].arange, parameter[name[nsteps]]]] / name[nsteps]] * name[span]]]
variable[nvals] assign[=] call[call[name[np].round, parameter[call[name[np].exp, parameter[name[midrange]]]]].astype, parameter[constant[int32]]]
return[call[name[np].unique, parameter[name[nvals]]]] | keyword[def] identifier[logmid_n] ( identifier[max_n] , identifier[ratio] = literal[int] / literal[int] , identifier[nsteps] = literal[int] ):
literal[string]
identifier[l] = identifier[np] . identifier[log] ( identifier[max_n] )
identifier[span] = identifier[l] * identifier[ratio]
identifier[start] = identifier[l] *( literal[int] - identifier[ratio] )* literal[int]
identifier[midrange] = identifier[start] + literal[int] * identifier[np] . identifier[arange] ( identifier[nsteps] )/ identifier[nsteps] * identifier[span]
identifier[nvals] = identifier[np] . identifier[round] ( identifier[np] . identifier[exp] ( identifier[midrange] )). identifier[astype] ( literal[string] )
keyword[return] identifier[np] . identifier[unique] ( identifier[nvals] ) | def logmid_n(max_n, ratio=1 / 4.0, nsteps=15):
"""
Creates an array of integers that lie evenly spaced in the "middle" of the
logarithmic scale from 0 to log(max_n).
If max_n is very small and/or nsteps is very large, this may lead to
duplicate values which will be removed from the output.
This function has benefits in hurst_rs, because it cuts away both very small
and very large n, which both can cause problems, and still produces a
logarithmically spaced sequence.
Args:
max_n (int):
largest possible output value (should be the sequence length when used in
hurst_rs)
Kwargs:
ratio (float):
width of the "middle" of the logarithmic interval relative to log(max_n).
For example, for ratio=1/2.0 the logarithm of the resulting values will
lie between 0.25 * log(max_n) and 0.75 * log(max_n).
nsteps (float):
(maximum) number of values to take from the specified range
Returns:
array of int:
a logarithmically spaced sequence of at most nsteps values (may be less,
because only unique values are returned)
"""
l = np.log(max_n)
span = l * ratio
start = l * (1 - ratio) * 0.5
midrange = start + 1.0 * np.arange(nsteps) / nsteps * span
nvals = np.round(np.exp(midrange)).astype('int32')
return np.unique(nvals) |
def login_required(fn):
"""auth decorator
call function(request, user: <aioauth_client User object>)
"""
async def wrapped(request, **kwargs):
session = await get_session(request)
if 'token' not in session:
return web.HTTPFound(cfg.oauth_redirect_path)
client = GoogleClient(
client_id=cfg.client_id,
client_secret=cfg.client_secret,
access_token=session['token']
)
try:
user, info = await client.user_info()
except Exception:
return web.HTTPFound(cfg.oauth_redirect_path)
return await fn(request, user, **kwargs)
return wrapped | def function[login_required, parameter[fn]]:
constant[auth decorator
call function(request, user: <aioauth_client User object>)
]
<ast.AsyncFunctionDef object at 0x7da1b06a0790>
return[name[wrapped]] | keyword[def] identifier[login_required] ( identifier[fn] ):
literal[string]
keyword[async] keyword[def] identifier[wrapped] ( identifier[request] ,** identifier[kwargs] ):
identifier[session] = keyword[await] identifier[get_session] ( identifier[request] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[session] :
keyword[return] identifier[web] . identifier[HTTPFound] ( identifier[cfg] . identifier[oauth_redirect_path] )
identifier[client] = identifier[GoogleClient] (
identifier[client_id] = identifier[cfg] . identifier[client_id] ,
identifier[client_secret] = identifier[cfg] . identifier[client_secret] ,
identifier[access_token] = identifier[session] [ literal[string] ]
)
keyword[try] :
identifier[user] , identifier[info] = keyword[await] identifier[client] . identifier[user_info] ()
keyword[except] identifier[Exception] :
keyword[return] identifier[web] . identifier[HTTPFound] ( identifier[cfg] . identifier[oauth_redirect_path] )
keyword[return] keyword[await] identifier[fn] ( identifier[request] , identifier[user] ,** identifier[kwargs] )
keyword[return] identifier[wrapped] | def login_required(fn):
"""auth decorator
call function(request, user: <aioauth_client User object>)
"""
async def wrapped(request, **kwargs):
session = await get_session(request)
if 'token' not in session:
return web.HTTPFound(cfg.oauth_redirect_path) # depends on [control=['if'], data=[]]
client = GoogleClient(client_id=cfg.client_id, client_secret=cfg.client_secret, access_token=session['token'])
try:
(user, info) = await client.user_info() # depends on [control=['try'], data=[]]
except Exception:
return web.HTTPFound(cfg.oauth_redirect_path) # depends on [control=['except'], data=[]]
return await fn(request, user, **kwargs)
return wrapped |
def debug(*args, **kwargs):
""" Just prints to stderr, unless printdebug is installed. Then it
will be replaced in `main()` by `printdebug.debug`.
"""
if kwargs.get('file', None) is None:
kwargs['file'] = sys.stderr
msg = kwargs.get('sep', ' ').join(str(a) for a in args)
print('debug: {}'.format(msg), **kwargs) | def function[debug, parameter[]]:
constant[ Just prints to stderr, unless printdebug is installed. Then it
will be replaced in `main()` by `printdebug.debug`.
]
if compare[call[name[kwargs].get, parameter[constant[file], constant[None]]] is constant[None]] begin[:]
call[name[kwargs]][constant[file]] assign[=] name[sys].stderr
variable[msg] assign[=] call[call[name[kwargs].get, parameter[constant[sep], constant[ ]]].join, parameter[<ast.GeneratorExp object at 0x7da1b031e7d0>]]
call[name[print], parameter[call[constant[debug: {}].format, parameter[name[msg]]]]] | keyword[def] identifier[debug] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ) keyword[is] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[sys] . identifier[stderr]
identifier[msg] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ). identifier[join] ( identifier[str] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[args] )
identifier[print] ( literal[string] . identifier[format] ( identifier[msg] ),** identifier[kwargs] ) | def debug(*args, **kwargs):
""" Just prints to stderr, unless printdebug is installed. Then it
will be replaced in `main()` by `printdebug.debug`.
"""
if kwargs.get('file', None) is None:
kwargs['file'] = sys.stderr # depends on [control=['if'], data=[]]
msg = kwargs.get('sep', ' ').join((str(a) for a in args))
print('debug: {}'.format(msg), **kwargs) |
def parse(self, m, prefix=None):
"""Parse branch notification messages sent by Launchpad.
"""
subject = m["subject"]
match = re.search(r"^\s*\[Branch\s+([^]]+)\]", subject)
if match:
repository = match.group(1)
else:
repository = None
# Put these into a dictionary, otherwise we cannot assign them
# from nested function definitions.
d = {'files': [], 'comments': ""}
gobbler = None
rev = None
author = None
when = util.now()
def gobble_comment(s):
d['comments'] += s + "\n"
def gobble_removed(s):
d['files'].append('%s REMOVED' % s)
def gobble_added(s):
d['files'].append('%s ADDED' % s)
def gobble_modified(s):
d['files'].append('%s MODIFIED' % s)
def gobble_renamed(s):
match = re.search(r"^(.+) => (.+)$", s)
if match:
d['files'].append('%s RENAMED %s' %
(match.group(1), match.group(2)))
else:
d['files'].append('%s RENAMED' % s)
lines = list(body_line_iterator(m, True))
rev = None
while lines:
line = str(lines.pop(0), "utf-8", errors="ignore")
# revno: 101
match = re.search(r"^revno: ([0-9.]+)", line)
if match:
rev = match.group(1)
# committer: Joe <joe@acme.com>
match = re.search(r"^committer: (.*)$", line)
if match:
author = match.group(1)
# timestamp: Fri 2009-05-15 10:35:43 +0200
# datetime.strptime() is supposed to support %z for time zone, but
# it does not seem to work. So handle the time zone manually.
match = re.search(
r"^timestamp: [a-zA-Z]{3} (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) ([-+])(\d{2})(\d{2})$", line)
if match:
datestr = match.group(1)
tz_sign = match.group(2)
tz_hours = match.group(3)
tz_minutes = match.group(4)
when = parseLaunchpadDate(
datestr, tz_sign, tz_hours, tz_minutes)
if re.search(r"^message:\s*$", line):
gobbler = gobble_comment
elif re.search(r"^removed:\s*$", line):
gobbler = gobble_removed
elif re.search(r"^added:\s*$", line):
gobbler = gobble_added
elif re.search(r"^renamed:\s*$", line):
gobbler = gobble_renamed
elif re.search(r"^modified:\s*$", line):
gobbler = gobble_modified
elif re.search(r"^ ", line) and gobbler:
gobbler(line[2:-1]) # Use :-1 to gobble trailing newline
# Determine the name of the branch.
branch = None
if self.branchMap and repository:
if repository in self.branchMap:
branch = self.branchMap[repository]
elif ("lp:" + repository) in self.branchMap:
branch = self.branchMap['lp:' + repository]
if not branch:
if self.defaultBranch:
branch = self.defaultBranch
else:
if repository:
branch = 'lp:' + repository
else:
branch = None
if rev and author:
return ('bzr', dict(author=author, files=d['files'],
comments=d['comments'],
when=when, revision=rev,
branch=branch, repository=repository or ''))
return None | def function[parse, parameter[self, m, prefix]]:
constant[Parse branch notification messages sent by Launchpad.
]
variable[subject] assign[=] call[name[m]][constant[subject]]
variable[match] assign[=] call[name[re].search, parameter[constant[^\s*\[Branch\s+([^]]+)\]], name[subject]]]
if name[match] begin[:]
variable[repository] assign[=] call[name[match].group, parameter[constant[1]]]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da18f8133a0>, <ast.Constant object at 0x7da18f813670>], [<ast.List object at 0x7da18f8131c0>, <ast.Constant object at 0x7da18f812a40>]]
variable[gobbler] assign[=] constant[None]
variable[rev] assign[=] constant[None]
variable[author] assign[=] constant[None]
variable[when] assign[=] call[name[util].now, parameter[]]
def function[gobble_comment, parameter[s]]:
<ast.AugAssign object at 0x7da18f8115d0>
def function[gobble_removed, parameter[s]]:
call[call[name[d]][constant[files]].append, parameter[binary_operation[constant[%s REMOVED] <ast.Mod object at 0x7da2590d6920> name[s]]]]
def function[gobble_added, parameter[s]]:
call[call[name[d]][constant[files]].append, parameter[binary_operation[constant[%s ADDED] <ast.Mod object at 0x7da2590d6920> name[s]]]]
def function[gobble_modified, parameter[s]]:
call[call[name[d]][constant[files]].append, parameter[binary_operation[constant[%s MODIFIED] <ast.Mod object at 0x7da2590d6920> name[s]]]]
def function[gobble_renamed, parameter[s]]:
variable[match] assign[=] call[name[re].search, parameter[constant[^(.+) => (.+)$], name[s]]]
if name[match] begin[:]
call[call[name[d]][constant[files]].append, parameter[binary_operation[constant[%s RENAMED %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f813310>, <ast.Call object at 0x7da18f810d90>]]]]]
variable[lines] assign[=] call[name[list], parameter[call[name[body_line_iterator], parameter[name[m], constant[True]]]]]
variable[rev] assign[=] constant[None]
while name[lines] begin[:]
variable[line] assign[=] call[name[str], parameter[call[name[lines].pop, parameter[constant[0]]], constant[utf-8]]]
variable[match] assign[=] call[name[re].search, parameter[constant[^revno: ([0-9.]+)], name[line]]]
if name[match] begin[:]
variable[rev] assign[=] call[name[match].group, parameter[constant[1]]]
variable[match] assign[=] call[name[re].search, parameter[constant[^committer: (.*)$], name[line]]]
if name[match] begin[:]
variable[author] assign[=] call[name[match].group, parameter[constant[1]]]
variable[match] assign[=] call[name[re].search, parameter[constant[^timestamp: [a-zA-Z]{3} (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) ([-+])(\d{2})(\d{2})$], name[line]]]
if name[match] begin[:]
variable[datestr] assign[=] call[name[match].group, parameter[constant[1]]]
variable[tz_sign] assign[=] call[name[match].group, parameter[constant[2]]]
variable[tz_hours] assign[=] call[name[match].group, parameter[constant[3]]]
variable[tz_minutes] assign[=] call[name[match].group, parameter[constant[4]]]
variable[when] assign[=] call[name[parseLaunchpadDate], parameter[name[datestr], name[tz_sign], name[tz_hours], name[tz_minutes]]]
if call[name[re].search, parameter[constant[^message:\s*$], name[line]]] begin[:]
variable[gobbler] assign[=] name[gobble_comment]
variable[branch] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f8123e0> begin[:]
if compare[name[repository] in name[self].branchMap] begin[:]
variable[branch] assign[=] call[name[self].branchMap][name[repository]]
if <ast.UnaryOp object at 0x7da2054a5060> begin[:]
if name[self].defaultBranch begin[:]
variable[branch] assign[=] name[self].defaultBranch
if <ast.BoolOp object at 0x7da2054a76d0> begin[:]
return[tuple[[<ast.Constant object at 0x7da2054a7d60>, <ast.Call object at 0x7da2054a65c0>]]]
return[constant[None]] | keyword[def] identifier[parse] ( identifier[self] , identifier[m] , identifier[prefix] = keyword[None] ):
literal[string]
identifier[subject] = identifier[m] [ literal[string] ]
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[subject] )
keyword[if] identifier[match] :
identifier[repository] = identifier[match] . identifier[group] ( literal[int] )
keyword[else] :
identifier[repository] = keyword[None]
identifier[d] ={ literal[string] :[], literal[string] : literal[string] }
identifier[gobbler] = keyword[None]
identifier[rev] = keyword[None]
identifier[author] = keyword[None]
identifier[when] = identifier[util] . identifier[now] ()
keyword[def] identifier[gobble_comment] ( identifier[s] ):
identifier[d] [ literal[string] ]+= identifier[s] + literal[string]
keyword[def] identifier[gobble_removed] ( identifier[s] ):
identifier[d] [ literal[string] ]. identifier[append] ( literal[string] % identifier[s] )
keyword[def] identifier[gobble_added] ( identifier[s] ):
identifier[d] [ literal[string] ]. identifier[append] ( literal[string] % identifier[s] )
keyword[def] identifier[gobble_modified] ( identifier[s] ):
identifier[d] [ literal[string] ]. identifier[append] ( literal[string] % identifier[s] )
keyword[def] identifier[gobble_renamed] ( identifier[s] ):
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[s] )
keyword[if] identifier[match] :
identifier[d] [ literal[string] ]. identifier[append] ( literal[string] %
( identifier[match] . identifier[group] ( literal[int] ), identifier[match] . identifier[group] ( literal[int] )))
keyword[else] :
identifier[d] [ literal[string] ]. identifier[append] ( literal[string] % identifier[s] )
identifier[lines] = identifier[list] ( identifier[body_line_iterator] ( identifier[m] , keyword[True] ))
identifier[rev] = keyword[None]
keyword[while] identifier[lines] :
identifier[line] = identifier[str] ( identifier[lines] . identifier[pop] ( literal[int] ), literal[string] , identifier[errors] = literal[string] )
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[line] )
keyword[if] identifier[match] :
identifier[rev] = identifier[match] . identifier[group] ( literal[int] )
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[line] )
keyword[if] identifier[match] :
identifier[author] = identifier[match] . identifier[group] ( literal[int] )
identifier[match] = identifier[re] . identifier[search] (
literal[string] , identifier[line] )
keyword[if] identifier[match] :
identifier[datestr] = identifier[match] . identifier[group] ( literal[int] )
identifier[tz_sign] = identifier[match] . identifier[group] ( literal[int] )
identifier[tz_hours] = identifier[match] . identifier[group] ( literal[int] )
identifier[tz_minutes] = identifier[match] . identifier[group] ( literal[int] )
identifier[when] = identifier[parseLaunchpadDate] (
identifier[datestr] , identifier[tz_sign] , identifier[tz_hours] , identifier[tz_minutes] )
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[line] ):
identifier[gobbler] = identifier[gobble_comment]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[line] ):
identifier[gobbler] = identifier[gobble_removed]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[line] ):
identifier[gobbler] = identifier[gobble_added]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[line] ):
identifier[gobbler] = identifier[gobble_renamed]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[line] ):
identifier[gobbler] = identifier[gobble_modified]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[line] ) keyword[and] identifier[gobbler] :
identifier[gobbler] ( identifier[line] [ literal[int] :- literal[int] ])
identifier[branch] = keyword[None]
keyword[if] identifier[self] . identifier[branchMap] keyword[and] identifier[repository] :
keyword[if] identifier[repository] keyword[in] identifier[self] . identifier[branchMap] :
identifier[branch] = identifier[self] . identifier[branchMap] [ identifier[repository] ]
keyword[elif] ( literal[string] + identifier[repository] ) keyword[in] identifier[self] . identifier[branchMap] :
identifier[branch] = identifier[self] . identifier[branchMap] [ literal[string] + identifier[repository] ]
keyword[if] keyword[not] identifier[branch] :
keyword[if] identifier[self] . identifier[defaultBranch] :
identifier[branch] = identifier[self] . identifier[defaultBranch]
keyword[else] :
keyword[if] identifier[repository] :
identifier[branch] = literal[string] + identifier[repository]
keyword[else] :
identifier[branch] = keyword[None]
keyword[if] identifier[rev] keyword[and] identifier[author] :
keyword[return] ( literal[string] , identifier[dict] ( identifier[author] = identifier[author] , identifier[files] = identifier[d] [ literal[string] ],
identifier[comments] = identifier[d] [ literal[string] ],
identifier[when] = identifier[when] , identifier[revision] = identifier[rev] ,
identifier[branch] = identifier[branch] , identifier[repository] = identifier[repository] keyword[or] literal[string] ))
keyword[return] keyword[None] | def parse(self, m, prefix=None):
"""Parse branch notification messages sent by Launchpad.
"""
subject = m['subject']
match = re.search('^\\s*\\[Branch\\s+([^]]+)\\]', subject)
if match:
repository = match.group(1) # depends on [control=['if'], data=[]]
else:
repository = None
# Put these into a dictionary, otherwise we cannot assign them
# from nested function definitions.
d = {'files': [], 'comments': ''}
gobbler = None
rev = None
author = None
when = util.now()
def gobble_comment(s):
d['comments'] += s + '\n'
def gobble_removed(s):
d['files'].append('%s REMOVED' % s)
def gobble_added(s):
d['files'].append('%s ADDED' % s)
def gobble_modified(s):
d['files'].append('%s MODIFIED' % s)
def gobble_renamed(s):
match = re.search('^(.+) => (.+)$', s)
if match:
d['files'].append('%s RENAMED %s' % (match.group(1), match.group(2))) # depends on [control=['if'], data=[]]
else:
d['files'].append('%s RENAMED' % s)
lines = list(body_line_iterator(m, True))
rev = None
while lines:
line = str(lines.pop(0), 'utf-8', errors='ignore')
# revno: 101
match = re.search('^revno: ([0-9.]+)', line)
if match:
rev = match.group(1) # depends on [control=['if'], data=[]]
# committer: Joe <joe@acme.com>
match = re.search('^committer: (.*)$', line)
if match:
author = match.group(1) # depends on [control=['if'], data=[]]
# timestamp: Fri 2009-05-15 10:35:43 +0200
# datetime.strptime() is supposed to support %z for time zone, but
# it does not seem to work. So handle the time zone manually.
match = re.search('^timestamp: [a-zA-Z]{3} (\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}) ([-+])(\\d{2})(\\d{2})$', line)
if match:
datestr = match.group(1)
tz_sign = match.group(2)
tz_hours = match.group(3)
tz_minutes = match.group(4)
when = parseLaunchpadDate(datestr, tz_sign, tz_hours, tz_minutes) # depends on [control=['if'], data=[]]
if re.search('^message:\\s*$', line):
gobbler = gobble_comment # depends on [control=['if'], data=[]]
elif re.search('^removed:\\s*$', line):
gobbler = gobble_removed # depends on [control=['if'], data=[]]
elif re.search('^added:\\s*$', line):
gobbler = gobble_added # depends on [control=['if'], data=[]]
elif re.search('^renamed:\\s*$', line):
gobbler = gobble_renamed # depends on [control=['if'], data=[]]
elif re.search('^modified:\\s*$', line):
gobbler = gobble_modified # depends on [control=['if'], data=[]]
elif re.search('^ ', line) and gobbler:
gobbler(line[2:-1]) # Use :-1 to gobble trailing newline # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# Determine the name of the branch.
branch = None
if self.branchMap and repository:
if repository in self.branchMap:
branch = self.branchMap[repository] # depends on [control=['if'], data=['repository']]
elif 'lp:' + repository in self.branchMap:
branch = self.branchMap['lp:' + repository] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not branch:
if self.defaultBranch:
branch = self.defaultBranch # depends on [control=['if'], data=[]]
elif repository:
branch = 'lp:' + repository # depends on [control=['if'], data=[]]
else:
branch = None # depends on [control=['if'], data=[]]
if rev and author:
return ('bzr', dict(author=author, files=d['files'], comments=d['comments'], when=when, revision=rev, branch=branch, repository=repository or '')) # depends on [control=['if'], data=[]]
return None |
def comment_out_magics(source):
"""
Utility used to make sure AST parser does not choke on unrecognized
magics.
"""
filtered = []
for line in source.splitlines():
if line.strip().startswith('%'):
filtered.append('# ' + line)
else:
filtered.append(line)
return '\n'.join(filtered) | def function[comment_out_magics, parameter[source]]:
constant[
Utility used to make sure AST parser does not choke on unrecognized
magics.
]
variable[filtered] assign[=] list[[]]
for taget[name[line]] in starred[call[name[source].splitlines, parameter[]]] begin[:]
if call[call[name[line].strip, parameter[]].startswith, parameter[constant[%]]] begin[:]
call[name[filtered].append, parameter[binary_operation[constant[# ] + name[line]]]]
return[call[constant[
].join, parameter[name[filtered]]]] | keyword[def] identifier[comment_out_magics] ( identifier[source] ):
literal[string]
identifier[filtered] =[]
keyword[for] identifier[line] keyword[in] identifier[source] . identifier[splitlines] ():
keyword[if] identifier[line] . identifier[strip] (). identifier[startswith] ( literal[string] ):
identifier[filtered] . identifier[append] ( literal[string] + identifier[line] )
keyword[else] :
identifier[filtered] . identifier[append] ( identifier[line] )
keyword[return] literal[string] . identifier[join] ( identifier[filtered] ) | def comment_out_magics(source):
"""
Utility used to make sure AST parser does not choke on unrecognized
magics.
"""
filtered = []
for line in source.splitlines():
if line.strip().startswith('%'):
filtered.append('# ' + line) # depends on [control=['if'], data=[]]
else:
filtered.append(line) # depends on [control=['for'], data=['line']]
return '\n'.join(filtered) |
def _from_dict(cls, _dict):
"""Initialize a QueryEvidence object from a json dictionary."""
args = {}
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'field' in _dict:
args['field'] = _dict.get('field')
if 'start_offset' in _dict:
args['start_offset'] = _dict.get('start_offset')
if 'end_offset' in _dict:
args['end_offset'] = _dict.get('end_offset')
if 'entities' in _dict:
args['entities'] = [
QueryEvidenceEntity._from_dict(x)
for x in (_dict.get('entities'))
]
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a QueryEvidence object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[document_id] in name[_dict]] begin[:]
call[name[args]][constant[document_id]] assign[=] call[name[_dict].get, parameter[constant[document_id]]]
if compare[constant[field] in name[_dict]] begin[:]
call[name[args]][constant[field]] assign[=] call[name[_dict].get, parameter[constant[field]]]
if compare[constant[start_offset] in name[_dict]] begin[:]
call[name[args]][constant[start_offset]] assign[=] call[name[_dict].get, parameter[constant[start_offset]]]
if compare[constant[end_offset] in name[_dict]] begin[:]
call[name[args]][constant[end_offset]] assign[=] call[name[_dict].get, parameter[constant[end_offset]]]
if compare[constant[entities] in name[_dict]] begin[:]
call[name[args]][constant[entities]] assign[=] <ast.ListComp object at 0x7da18f09d3f0>
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[QueryEvidenceEntity] . identifier[_from_dict] ( identifier[x] )
keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a QueryEvidence object from a json dictionary."""
args = {}
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id') # depends on [control=['if'], data=['_dict']]
if 'field' in _dict:
args['field'] = _dict.get('field') # depends on [control=['if'], data=['_dict']]
if 'start_offset' in _dict:
args['start_offset'] = _dict.get('start_offset') # depends on [control=['if'], data=['_dict']]
if 'end_offset' in _dict:
args['end_offset'] = _dict.get('end_offset') # depends on [control=['if'], data=['_dict']]
if 'entities' in _dict:
args['entities'] = [QueryEvidenceEntity._from_dict(x) for x in _dict.get('entities')] # depends on [control=['if'], data=['_dict']]
return cls(**args) |
def connect_outgoing(self, taskspec, sequence_flow_id, sequence_flow_name,
documentation):
"""
Connect this task spec to the indicated child.
:param sequence_flow_id: The ID of the connecting sequenceFlow node.
:param sequence_flow_name: The name of the connecting sequenceFlow
node.
"""
self.connect(taskspec)
s = SequenceFlow(
sequence_flow_id, sequence_flow_name, documentation, taskspec)
self.outgoing_sequence_flows[taskspec.name] = s
self.outgoing_sequence_flows_by_id[sequence_flow_id] = s | def function[connect_outgoing, parameter[self, taskspec, sequence_flow_id, sequence_flow_name, documentation]]:
constant[
Connect this task spec to the indicated child.
:param sequence_flow_id: The ID of the connecting sequenceFlow node.
:param sequence_flow_name: The name of the connecting sequenceFlow
node.
]
call[name[self].connect, parameter[name[taskspec]]]
variable[s] assign[=] call[name[SequenceFlow], parameter[name[sequence_flow_id], name[sequence_flow_name], name[documentation], name[taskspec]]]
call[name[self].outgoing_sequence_flows][name[taskspec].name] assign[=] name[s]
call[name[self].outgoing_sequence_flows_by_id][name[sequence_flow_id]] assign[=] name[s] | keyword[def] identifier[connect_outgoing] ( identifier[self] , identifier[taskspec] , identifier[sequence_flow_id] , identifier[sequence_flow_name] ,
identifier[documentation] ):
literal[string]
identifier[self] . identifier[connect] ( identifier[taskspec] )
identifier[s] = identifier[SequenceFlow] (
identifier[sequence_flow_id] , identifier[sequence_flow_name] , identifier[documentation] , identifier[taskspec] )
identifier[self] . identifier[outgoing_sequence_flows] [ identifier[taskspec] . identifier[name] ]= identifier[s]
identifier[self] . identifier[outgoing_sequence_flows_by_id] [ identifier[sequence_flow_id] ]= identifier[s] | def connect_outgoing(self, taskspec, sequence_flow_id, sequence_flow_name, documentation):
"""
Connect this task spec to the indicated child.
:param sequence_flow_id: The ID of the connecting sequenceFlow node.
:param sequence_flow_name: The name of the connecting sequenceFlow
node.
"""
self.connect(taskspec)
s = SequenceFlow(sequence_flow_id, sequence_flow_name, documentation, taskspec)
self.outgoing_sequence_flows[taskspec.name] = s
self.outgoing_sequence_flows_by_id[sequence_flow_id] = s |
def copy(self):
""" Create a copy, and return it."""
new_cursor = self.__class__(self.x, self.y)
new_cursor.set_bounds(self.xmin, self.ymin, self.xmax, self.ymax, self.ymaxmax)
new_cursor.set_deltas(self.dx, self.dy)
return new_cursor | def function[copy, parameter[self]]:
constant[ Create a copy, and return it.]
variable[new_cursor] assign[=] call[name[self].__class__, parameter[name[self].x, name[self].y]]
call[name[new_cursor].set_bounds, parameter[name[self].xmin, name[self].ymin, name[self].xmax, name[self].ymax, name[self].ymaxmax]]
call[name[new_cursor].set_deltas, parameter[name[self].dx, name[self].dy]]
return[name[new_cursor]] | keyword[def] identifier[copy] ( identifier[self] ):
literal[string]
identifier[new_cursor] = identifier[self] . identifier[__class__] ( identifier[self] . identifier[x] , identifier[self] . identifier[y] )
identifier[new_cursor] . identifier[set_bounds] ( identifier[self] . identifier[xmin] , identifier[self] . identifier[ymin] , identifier[self] . identifier[xmax] , identifier[self] . identifier[ymax] , identifier[self] . identifier[ymaxmax] )
identifier[new_cursor] . identifier[set_deltas] ( identifier[self] . identifier[dx] , identifier[self] . identifier[dy] )
keyword[return] identifier[new_cursor] | def copy(self):
""" Create a copy, and return it."""
new_cursor = self.__class__(self.x, self.y)
new_cursor.set_bounds(self.xmin, self.ymin, self.xmax, self.ymax, self.ymaxmax)
new_cursor.set_deltas(self.dx, self.dy)
return new_cursor |
def update(self, new_lease_time, client=None):
"""Update the duration of a task lease
:type new_lease_time: int
:param new_lease_time: the new lease time in seconds.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the task's taskqueue.
:rtype: :class:`Task`
:returns: The task that was just updated.
:raises: :class:`gcloud.exceptions.NotFound`
(propagated from
:meth:`gcloud.taskqueue.taskqueue.Taskqueue.update_task`).
"""
return self.taskqueue.update_task(self.id, new_lease_time=new_lease_time, client=client) | def function[update, parameter[self, new_lease_time, client]]:
constant[Update the duration of a task lease
:type new_lease_time: int
:param new_lease_time: the new lease time in seconds.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the task's taskqueue.
:rtype: :class:`Task`
:returns: The task that was just updated.
:raises: :class:`gcloud.exceptions.NotFound`
(propagated from
:meth:`gcloud.taskqueue.taskqueue.Taskqueue.update_task`).
]
return[call[name[self].taskqueue.update_task, parameter[name[self].id]]] | keyword[def] identifier[update] ( identifier[self] , identifier[new_lease_time] , identifier[client] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[taskqueue] . identifier[update_task] ( identifier[self] . identifier[id] , identifier[new_lease_time] = identifier[new_lease_time] , identifier[client] = identifier[client] ) | def update(self, new_lease_time, client=None):
"""Update the duration of a task lease
:type new_lease_time: int
:param new_lease_time: the new lease time in seconds.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the task's taskqueue.
:rtype: :class:`Task`
:returns: The task that was just updated.
:raises: :class:`gcloud.exceptions.NotFound`
(propagated from
:meth:`gcloud.taskqueue.taskqueue.Taskqueue.update_task`).
"""
return self.taskqueue.update_task(self.id, new_lease_time=new_lease_time, client=client) |
def jobStartNext(self):
""" For use only by Nupic Scheduler (also known as ClientJobManager) Look
through the jobs table and see if any new job requests have been
queued up. If so, pick one and mark it as starting up and create the
model table to hold the results
Parameters:
----------------------------------------------------------------
retval: jobID of the job we are starting up, if found; None if not found
"""
# NOTE: cursor.execute('SELECT @update_id') trick is unreliable: if a
# connection loss occurs during cursor.execute, then the server-cached
# information is lost, and we cannot get the updated job ID; so, we use
# this select instead
row = self._getOneMatchingRowWithRetries(
self._jobs, dict(status=self.STATUS_NOTSTARTED), ['job_id'])
if row is None:
return None
(jobID,) = row
self._startJobWithRetries(jobID)
return jobID | def function[jobStartNext, parameter[self]]:
constant[ For use only by Nupic Scheduler (also known as ClientJobManager) Look
through the jobs table and see if any new job requests have been
queued up. If so, pick one and mark it as starting up and create the
model table to hold the results
Parameters:
----------------------------------------------------------------
retval: jobID of the job we are starting up, if found; None if not found
]
variable[row] assign[=] call[name[self]._getOneMatchingRowWithRetries, parameter[name[self]._jobs, call[name[dict], parameter[]], list[[<ast.Constant object at 0x7da20c6c43d0>]]]]
if compare[name[row] is constant[None]] begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da20c6c58a0> assign[=] name[row]
call[name[self]._startJobWithRetries, parameter[name[jobID]]]
return[name[jobID]] | keyword[def] identifier[jobStartNext] ( identifier[self] ):
literal[string]
identifier[row] = identifier[self] . identifier[_getOneMatchingRowWithRetries] (
identifier[self] . identifier[_jobs] , identifier[dict] ( identifier[status] = identifier[self] . identifier[STATUS_NOTSTARTED] ),[ literal[string] ])
keyword[if] identifier[row] keyword[is] keyword[None] :
keyword[return] keyword[None]
( identifier[jobID] ,)= identifier[row]
identifier[self] . identifier[_startJobWithRetries] ( identifier[jobID] )
keyword[return] identifier[jobID] | def jobStartNext(self):
""" For use only by Nupic Scheduler (also known as ClientJobManager) Look
through the jobs table and see if any new job requests have been
queued up. If so, pick one and mark it as starting up and create the
model table to hold the results
Parameters:
----------------------------------------------------------------
retval: jobID of the job we are starting up, if found; None if not found
"""
# NOTE: cursor.execute('SELECT @update_id') trick is unreliable: if a
# connection loss occurs during cursor.execute, then the server-cached
# information is lost, and we cannot get the updated job ID; so, we use
# this select instead
row = self._getOneMatchingRowWithRetries(self._jobs, dict(status=self.STATUS_NOTSTARTED), ['job_id'])
if row is None:
return None # depends on [control=['if'], data=[]]
(jobID,) = row
self._startJobWithRetries(jobID)
return jobID |
def absl_to_cpp(level):
"""Converts an absl log level to a cpp log level.
Args:
level: int, an absl.logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in Abseil C++.
"""
if not isinstance(level, int):
raise TypeError('Expect an int level, found {}'.format(type(level)))
if level >= 0:
# C++ log levels must be >= 0
return 0
else:
return -level | def function[absl_to_cpp, parameter[level]]:
constant[Converts an absl log level to a cpp log level.
Args:
level: int, an absl.logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in Abseil C++.
]
if <ast.UnaryOp object at 0x7da1b188c5e0> begin[:]
<ast.Raise object at 0x7da1b188c2b0>
if compare[name[level] greater_or_equal[>=] constant[0]] begin[:]
return[constant[0]] | keyword[def] identifier[absl_to_cpp] ( identifier[level] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[level] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[level] )))
keyword[if] identifier[level] >= literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] - identifier[level] | def absl_to_cpp(level):
"""Converts an absl log level to a cpp log level.
Args:
level: int, an absl.logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in Abseil C++.
"""
if not isinstance(level, int):
raise TypeError('Expect an int level, found {}'.format(type(level))) # depends on [control=['if'], data=[]]
if level >= 0:
# C++ log levels must be >= 0
return 0 # depends on [control=['if'], data=[]]
else:
return -level |
def _get_sa_at_1180(self, C, imt, sites, rup, dists):
"""
Compute and return mean imt value for rock conditions
(vs30 = 1100 m/s)
"""
# reference vs30 = 1180 m/s
vs30_1180 = np.ones_like(sites.vs30) * 1180.
# reference shaking intensity = 0
ref_iml = np.zeros_like(sites.vs30)
# fake Z1.0 - Since negative it will be replaced by the default Z1.0
# for the corresponding region
fake_z1pt0 = np.ones_like(sites.vs30) * -1
return (self._get_basic_term(C, rup, dists) +
self._get_faulting_style_term(C, rup) +
self._get_site_response_term(C, imt, vs30_1180, ref_iml) +
self._get_hanging_wall_term(C, dists, rup) +
self._get_top_of_rupture_depth_term(C, imt, rup) +
self._get_soil_depth_term(C, fake_z1pt0, vs30_1180) +
self._get_regional_term(C, imt, vs30_1180, dists.rrup)
) | def function[_get_sa_at_1180, parameter[self, C, imt, sites, rup, dists]]:
constant[
Compute and return mean imt value for rock conditions
(vs30 = 1100 m/s)
]
variable[vs30_1180] assign[=] binary_operation[call[name[np].ones_like, parameter[name[sites].vs30]] * constant[1180.0]]
variable[ref_iml] assign[=] call[name[np].zeros_like, parameter[name[sites].vs30]]
variable[fake_z1pt0] assign[=] binary_operation[call[name[np].ones_like, parameter[name[sites].vs30]] * <ast.UnaryOp object at 0x7da1b133cf10>]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[self]._get_basic_term, parameter[name[C], name[rup], name[dists]]] + call[name[self]._get_faulting_style_term, parameter[name[C], name[rup]]]] + call[name[self]._get_site_response_term, parameter[name[C], name[imt], name[vs30_1180], name[ref_iml]]]] + call[name[self]._get_hanging_wall_term, parameter[name[C], name[dists], name[rup]]]] + call[name[self]._get_top_of_rupture_depth_term, parameter[name[C], name[imt], name[rup]]]] + call[name[self]._get_soil_depth_term, parameter[name[C], name[fake_z1pt0], name[vs30_1180]]]] + call[name[self]._get_regional_term, parameter[name[C], name[imt], name[vs30_1180], name[dists].rrup]]]] | keyword[def] identifier[_get_sa_at_1180] ( identifier[self] , identifier[C] , identifier[imt] , identifier[sites] , identifier[rup] , identifier[dists] ):
literal[string]
identifier[vs30_1180] = identifier[np] . identifier[ones_like] ( identifier[sites] . identifier[vs30] )* literal[int]
identifier[ref_iml] = identifier[np] . identifier[zeros_like] ( identifier[sites] . identifier[vs30] )
identifier[fake_z1pt0] = identifier[np] . identifier[ones_like] ( identifier[sites] . identifier[vs30] )*- literal[int]
keyword[return] ( identifier[self] . identifier[_get_basic_term] ( identifier[C] , identifier[rup] , identifier[dists] )+
identifier[self] . identifier[_get_faulting_style_term] ( identifier[C] , identifier[rup] )+
identifier[self] . identifier[_get_site_response_term] ( identifier[C] , identifier[imt] , identifier[vs30_1180] , identifier[ref_iml] )+
identifier[self] . identifier[_get_hanging_wall_term] ( identifier[C] , identifier[dists] , identifier[rup] )+
identifier[self] . identifier[_get_top_of_rupture_depth_term] ( identifier[C] , identifier[imt] , identifier[rup] )+
identifier[self] . identifier[_get_soil_depth_term] ( identifier[C] , identifier[fake_z1pt0] , identifier[vs30_1180] )+
identifier[self] . identifier[_get_regional_term] ( identifier[C] , identifier[imt] , identifier[vs30_1180] , identifier[dists] . identifier[rrup] )
) | def _get_sa_at_1180(self, C, imt, sites, rup, dists):
"""
Compute and return mean imt value for rock conditions
(vs30 = 1100 m/s)
"""
# reference vs30 = 1180 m/s
vs30_1180 = np.ones_like(sites.vs30) * 1180.0
# reference shaking intensity = 0
ref_iml = np.zeros_like(sites.vs30)
# fake Z1.0 - Since negative it will be replaced by the default Z1.0
# for the corresponding region
fake_z1pt0 = np.ones_like(sites.vs30) * -1
return self._get_basic_term(C, rup, dists) + self._get_faulting_style_term(C, rup) + self._get_site_response_term(C, imt, vs30_1180, ref_iml) + self._get_hanging_wall_term(C, dists, rup) + self._get_top_of_rupture_depth_term(C, imt, rup) + self._get_soil_depth_term(C, fake_z1pt0, vs30_1180) + self._get_regional_term(C, imt, vs30_1180, dists.rrup) |
def recv(self, filename, dest_file, timeout=None):
"""Retrieve a file from the device into the file-like dest_file."""
transport = DataFilesyncTransport(self.stream)
transport.write_data('RECV', filename, timeout)
for data_msg in transport.read_until_done('DATA', timeout):
dest_file.write(data_msg.data) | def function[recv, parameter[self, filename, dest_file, timeout]]:
constant[Retrieve a file from the device into the file-like dest_file.]
variable[transport] assign[=] call[name[DataFilesyncTransport], parameter[name[self].stream]]
call[name[transport].write_data, parameter[constant[RECV], name[filename], name[timeout]]]
for taget[name[data_msg]] in starred[call[name[transport].read_until_done, parameter[constant[DATA], name[timeout]]]] begin[:]
call[name[dest_file].write, parameter[name[data_msg].data]] | keyword[def] identifier[recv] ( identifier[self] , identifier[filename] , identifier[dest_file] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[transport] = identifier[DataFilesyncTransport] ( identifier[self] . identifier[stream] )
identifier[transport] . identifier[write_data] ( literal[string] , identifier[filename] , identifier[timeout] )
keyword[for] identifier[data_msg] keyword[in] identifier[transport] . identifier[read_until_done] ( literal[string] , identifier[timeout] ):
identifier[dest_file] . identifier[write] ( identifier[data_msg] . identifier[data] ) | def recv(self, filename, dest_file, timeout=None):
"""Retrieve a file from the device into the file-like dest_file."""
transport = DataFilesyncTransport(self.stream)
transport.write_data('RECV', filename, timeout)
for data_msg in transport.read_until_done('DATA', timeout):
dest_file.write(data_msg.data) # depends on [control=['for'], data=['data_msg']] |
def _candlestick_ax(df, ax):
"""
# Alternatively: (but hard to get dates set up properly)
plt.xticks(range(len(df.index)), df.index, rotation=45)
fplt.candlestick2_ohlc(ax, df.loc[:, 'Open'].values, df.loc[:, 'High'].values,
df.loc[:, 'Low'].values, df.loc[:, 'Close'].values, width=0.2)
"""
quotes = df.reset_index()
quotes.loc[:, 'Date'] = mdates.date2num(quotes.loc[:, 'Date'].astype(dt.date))
fplt.candlestick_ohlc(ax, quotes.values) | def function[_candlestick_ax, parameter[df, ax]]:
constant[
# Alternatively: (but hard to get dates set up properly)
plt.xticks(range(len(df.index)), df.index, rotation=45)
fplt.candlestick2_ohlc(ax, df.loc[:, 'Open'].values, df.loc[:, 'High'].values,
df.loc[:, 'Low'].values, df.loc[:, 'Close'].values, width=0.2)
]
variable[quotes] assign[=] call[name[df].reset_index, parameter[]]
call[name[quotes].loc][tuple[[<ast.Slice object at 0x7da1b2765f60>, <ast.Constant object at 0x7da1b2767610>]]] assign[=] call[name[mdates].date2num, parameter[call[call[name[quotes].loc][tuple[[<ast.Slice object at 0x7da20c7c8790>, <ast.Constant object at 0x7da20c7cbaf0>]]].astype, parameter[name[dt].date]]]]
call[name[fplt].candlestick_ohlc, parameter[name[ax], name[quotes].values]] | keyword[def] identifier[_candlestick_ax] ( identifier[df] , identifier[ax] ):
literal[string]
identifier[quotes] = identifier[df] . identifier[reset_index] ()
identifier[quotes] . identifier[loc] [:, literal[string] ]= identifier[mdates] . identifier[date2num] ( identifier[quotes] . identifier[loc] [:, literal[string] ]. identifier[astype] ( identifier[dt] . identifier[date] ))
identifier[fplt] . identifier[candlestick_ohlc] ( identifier[ax] , identifier[quotes] . identifier[values] ) | def _candlestick_ax(df, ax):
"""
# Alternatively: (but hard to get dates set up properly)
plt.xticks(range(len(df.index)), df.index, rotation=45)
fplt.candlestick2_ohlc(ax, df.loc[:, 'Open'].values, df.loc[:, 'High'].values,
df.loc[:, 'Low'].values, df.loc[:, 'Close'].values, width=0.2)
"""
quotes = df.reset_index()
quotes.loc[:, 'Date'] = mdates.date2num(quotes.loc[:, 'Date'].astype(dt.date))
fplt.candlestick_ohlc(ax, quotes.values) |
def _find_set_members(set):
'''
Return list of members for a set
'''
cmd = '{0} list {1}'.format(_ipset_cmd(), set)
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out['retcode'] > 0:
# Set doesn't exist return false
return False
_tmp = out['stdout'].split('\n')
members = []
startMembers = False
for i in _tmp:
if startMembers:
members.append(i)
if 'Members:' in i:
startMembers = True
return members | def function[_find_set_members, parameter[set]]:
constant[
Return list of members for a set
]
variable[cmd] assign[=] call[constant[{0} list {1}].format, parameter[call[name[_ipset_cmd], parameter[]], name[set]]]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[out]][constant[retcode]] greater[>] constant[0]] begin[:]
return[constant[False]]
variable[_tmp] assign[=] call[call[name[out]][constant[stdout]].split, parameter[constant[
]]]
variable[members] assign[=] list[[]]
variable[startMembers] assign[=] constant[False]
for taget[name[i]] in starred[name[_tmp]] begin[:]
if name[startMembers] begin[:]
call[name[members].append, parameter[name[i]]]
if compare[constant[Members:] in name[i]] begin[:]
variable[startMembers] assign[=] constant[True]
return[name[members]] | keyword[def] identifier[_find_set_members] ( identifier[set] ):
literal[string]
identifier[cmd] = literal[string] . identifier[format] ( identifier[_ipset_cmd] (), identifier[set] )
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] )
keyword[if] identifier[out] [ literal[string] ]> literal[int] :
keyword[return] keyword[False]
identifier[_tmp] = identifier[out] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[members] =[]
identifier[startMembers] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[_tmp] :
keyword[if] identifier[startMembers] :
identifier[members] . identifier[append] ( identifier[i] )
keyword[if] literal[string] keyword[in] identifier[i] :
identifier[startMembers] = keyword[True]
keyword[return] identifier[members] | def _find_set_members(set):
"""
Return list of members for a set
"""
cmd = '{0} list {1}'.format(_ipset_cmd(), set)
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out['retcode'] > 0:
# Set doesn't exist return false
return False # depends on [control=['if'], data=[]]
_tmp = out['stdout'].split('\n')
members = []
startMembers = False
for i in _tmp:
if startMembers:
members.append(i) # depends on [control=['if'], data=[]]
if 'Members:' in i:
startMembers = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return members |
def TempFilePath(suffix = "", prefix = "tmp",
dir = None): # pylint: disable=redefined-builtin
"""Creates a temporary file based on the environment configuration.
If no directory is specified the file will be placed in folder as specified by
the `TEST_TMPDIR` environment variable if available or fallback to
`Test.tmpdir` of the current configuration if not.
If directory is specified it must be part of the default test temporary
directory.
Args:
suffix: A suffix to end the file name with.
prefix: A prefix to begin the file name with.
dir: A directory to place the file in.
Returns:
An absolute path to the created file.
Raises:
ValueError: If the specified directory is not part of the default test
temporary directory.
"""
precondition.AssertType(suffix, Text)
precondition.AssertType(prefix, Text)
precondition.AssertOptionalType(dir, Text)
root = _TempRootPath()
if not dir:
dir = root
elif root and not os.path.commonprefix([dir, root]):
raise ValueError("path '%s' must start with '%s'" % (dir, root))
# `mkstemp` returns an open descriptor for the file. We don't care about it as
# we are only concerned with the path, but we need to close it first or else
# the file will remain open until the garbage collectors steps in.
desc, path = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(desc)
return path | def function[TempFilePath, parameter[suffix, prefix, dir]]:
constant[Creates a temporary file based on the environment configuration.
If no directory is specified the file will be placed in folder as specified by
the `TEST_TMPDIR` environment variable if available or fallback to
`Test.tmpdir` of the current configuration if not.
If directory is specified it must be part of the default test temporary
directory.
Args:
suffix: A suffix to end the file name with.
prefix: A prefix to begin the file name with.
dir: A directory to place the file in.
Returns:
An absolute path to the created file.
Raises:
ValueError: If the specified directory is not part of the default test
temporary directory.
]
call[name[precondition].AssertType, parameter[name[suffix], name[Text]]]
call[name[precondition].AssertType, parameter[name[prefix], name[Text]]]
call[name[precondition].AssertOptionalType, parameter[name[dir], name[Text]]]
variable[root] assign[=] call[name[_TempRootPath], parameter[]]
if <ast.UnaryOp object at 0x7da1b1c3e410> begin[:]
variable[dir] assign[=] name[root]
<ast.Tuple object at 0x7da1b1c3c8e0> assign[=] call[name[tempfile].mkstemp, parameter[]]
call[name[os].close, parameter[name[desc]]]
return[name[path]] | keyword[def] identifier[TempFilePath] ( identifier[suffix] = literal[string] , identifier[prefix] = literal[string] ,
identifier[dir] = keyword[None] ):
literal[string]
identifier[precondition] . identifier[AssertType] ( identifier[suffix] , identifier[Text] )
identifier[precondition] . identifier[AssertType] ( identifier[prefix] , identifier[Text] )
identifier[precondition] . identifier[AssertOptionalType] ( identifier[dir] , identifier[Text] )
identifier[root] = identifier[_TempRootPath] ()
keyword[if] keyword[not] identifier[dir] :
identifier[dir] = identifier[root]
keyword[elif] identifier[root] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[commonprefix] ([ identifier[dir] , identifier[root] ]):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[dir] , identifier[root] ))
identifier[desc] , identifier[path] = identifier[tempfile] . identifier[mkstemp] ( identifier[suffix] = identifier[suffix] , identifier[prefix] = identifier[prefix] , identifier[dir] = identifier[dir] )
identifier[os] . identifier[close] ( identifier[desc] )
keyword[return] identifier[path] | def TempFilePath(suffix='', prefix='tmp', dir=None): # pylint: disable=redefined-builtin
'Creates a temporary file based on the environment configuration.\n\n If no directory is specified the file will be placed in folder as specified by\n the `TEST_TMPDIR` environment variable if available or fallback to\n `Test.tmpdir` of the current configuration if not.\n\n If directory is specified it must be part of the default test temporary\n directory.\n\n Args:\n suffix: A suffix to end the file name with.\n prefix: A prefix to begin the file name with.\n dir: A directory to place the file in.\n\n Returns:\n An absolute path to the created file.\n\n Raises:\n ValueError: If the specified directory is not part of the default test\n temporary directory.\n '
precondition.AssertType(suffix, Text)
precondition.AssertType(prefix, Text)
precondition.AssertOptionalType(dir, Text)
root = _TempRootPath()
if not dir:
dir = root # depends on [control=['if'], data=[]]
elif root and (not os.path.commonprefix([dir, root])):
raise ValueError("path '%s' must start with '%s'" % (dir, root)) # depends on [control=['if'], data=[]]
# `mkstemp` returns an open descriptor for the file. We don't care about it as
# we are only concerned with the path, but we need to close it first or else
# the file will remain open until the garbage collectors steps in.
(desc, path) = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(desc)
return path |
def try_number(self):
"""
Return the try number that this task number will be when it is actually
run.
If the TI is currently running, this will match the column in the
databse, in all othercases this will be incremenetd
"""
# This is designed so that task logs end up in the right file.
if self.state == State.RUNNING:
return self._try_number
return self._try_number + 1 | def function[try_number, parameter[self]]:
constant[
Return the try number that this task number will be when it is actually
run.
If the TI is currently running, this will match the column in the
databse, in all othercases this will be incremenetd
]
if compare[name[self].state equal[==] name[State].RUNNING] begin[:]
return[name[self]._try_number]
return[binary_operation[name[self]._try_number + constant[1]]] | keyword[def] identifier[try_number] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[state] == identifier[State] . identifier[RUNNING] :
keyword[return] identifier[self] . identifier[_try_number]
keyword[return] identifier[self] . identifier[_try_number] + literal[int] | def try_number(self):
"""
Return the try number that this task number will be when it is actually
run.
If the TI is currently running, this will match the column in the
databse, in all othercases this will be incremenetd
"""
# This is designed so that task logs end up in the right file.
if self.state == State.RUNNING:
return self._try_number # depends on [control=['if'], data=[]]
return self._try_number + 1 |
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b"basic":
try:
username, password = base64.b64decode(auth_info).split(b":", 1)
except Exception:
return
return Authorization(
"basic",
{
"username": to_unicode(username, _basic_auth_charset),
"password": to_unicode(password, _basic_auth_charset),
},
)
elif auth_type == b"digest":
auth_map = parse_dict_header(auth_info)
for key in "username", "realm", "nonce", "uri", "response":
if key not in auth_map:
return
if "qop" in auth_map:
if not auth_map.get("nc") or not auth_map.get("cnonce"):
return
return Authorization("digest", auth_map) | def function[parse_authorization_header, parameter[value]]:
constant[Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
]
if <ast.UnaryOp object at 0x7da20e9b00d0> begin[:]
return[None]
variable[value] assign[=] call[name[wsgi_to_bytes], parameter[name[value]]]
<ast.Try object at 0x7da20e9b0520>
if compare[name[auth_type] equal[==] constant[b'basic']] begin[:]
<ast.Try object at 0x7da20e9b38e0>
return[call[name[Authorization], parameter[constant[basic], dictionary[[<ast.Constant object at 0x7da18bcca380>, <ast.Constant object at 0x7da18bccbfd0>], [<ast.Call object at 0x7da18bcc96f0>, <ast.Call object at 0x7da18bcca6b0>]]]]] | keyword[def] identifier[parse_authorization_header] ( identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return]
identifier[value] = identifier[wsgi_to_bytes] ( identifier[value] )
keyword[try] :
identifier[auth_type] , identifier[auth_info] = identifier[value] . identifier[split] ( keyword[None] , literal[int] )
identifier[auth_type] = identifier[auth_type] . identifier[lower] ()
keyword[except] identifier[ValueError] :
keyword[return]
keyword[if] identifier[auth_type] == literal[string] :
keyword[try] :
identifier[username] , identifier[password] = identifier[base64] . identifier[b64decode] ( identifier[auth_info] ). identifier[split] ( literal[string] , literal[int] )
keyword[except] identifier[Exception] :
keyword[return]
keyword[return] identifier[Authorization] (
literal[string] ,
{
literal[string] : identifier[to_unicode] ( identifier[username] , identifier[_basic_auth_charset] ),
literal[string] : identifier[to_unicode] ( identifier[password] , identifier[_basic_auth_charset] ),
},
)
keyword[elif] identifier[auth_type] == literal[string] :
identifier[auth_map] = identifier[parse_dict_header] ( identifier[auth_info] )
keyword[for] identifier[key] keyword[in] literal[string] , literal[string] , literal[string] , literal[string] , literal[string] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[auth_map] :
keyword[return]
keyword[if] literal[string] keyword[in] identifier[auth_map] :
keyword[if] keyword[not] identifier[auth_map] . identifier[get] ( literal[string] ) keyword[or] keyword[not] identifier[auth_map] . identifier[get] ( literal[string] ):
keyword[return]
keyword[return] identifier[Authorization] ( literal[string] , identifier[auth_map] ) | def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return # depends on [control=['if'], data=[]]
value = wsgi_to_bytes(value)
try:
(auth_type, auth_info) = value.split(None, 1)
auth_type = auth_type.lower() # depends on [control=['try'], data=[]]
except ValueError:
return # depends on [control=['except'], data=[]]
if auth_type == b'basic':
try:
(username, password) = base64.b64decode(auth_info).split(b':', 1) # depends on [control=['try'], data=[]]
except Exception:
return # depends on [control=['except'], data=[]]
return Authorization('basic', {'username': to_unicode(username, _basic_auth_charset), 'password': to_unicode(password, _basic_auth_charset)}) # depends on [control=['if'], data=[]]
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in ('username', 'realm', 'nonce', 'uri', 'response'):
if key not in auth_map:
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['auth_map']]
return Authorization('digest', auth_map) # depends on [control=['if'], data=[]] |
def send_command(self, command):
"""Send a command for FastAGI request:
:param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds'
:type command: String
:Example:
::
@asyncio.coroutine
def call_waiting(request):
print(['AGI variables:', request.headers])
yield from request.send_command('ANSWER')
yield from request.send_command('EXEC StartMusicOnHold')
yield from request.send_command('EXEC Wait 10')
"""
command += '\n'
self.writer.write(command.encode(self.encoding))
yield from self.writer.drain()
agi_result = yield from self._read_result()
# If Asterisk returns `100 Trying...`, wait for next the response.
while agi_result.get('status_code') == 100:
agi_result = yield from self._read_result()
# when we got AGIUsageError the following line contains some indication
if 'error' in agi_result and agi_result['error'] == 'AGIUsageError':
buff_usage_error = yield from self.reader.readline()
agi_result['msg'] += buff_usage_error.decode(self.encoding)
return agi_result | def function[send_command, parameter[self, command]]:
constant[Send a command for FastAGI request:
:param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds'
:type command: String
:Example:
::
@asyncio.coroutine
def call_waiting(request):
print(['AGI variables:', request.headers])
yield from request.send_command('ANSWER')
yield from request.send_command('EXEC StartMusicOnHold')
yield from request.send_command('EXEC Wait 10')
]
<ast.AugAssign object at 0x7da18c4cdbd0>
call[name[self].writer.write, parameter[call[name[command].encode, parameter[name[self].encoding]]]]
<ast.YieldFrom object at 0x7da18c4cf8b0>
variable[agi_result] assign[=] <ast.YieldFrom object at 0x7da18c4cc0d0>
while compare[call[name[agi_result].get, parameter[constant[status_code]]] equal[==] constant[100]] begin[:]
variable[agi_result] assign[=] <ast.YieldFrom object at 0x7da18c4cfa30>
if <ast.BoolOp object at 0x7da18c4ce920> begin[:]
variable[buff_usage_error] assign[=] <ast.YieldFrom object at 0x7da18c4cf760>
<ast.AugAssign object at 0x7da18c4cfe20>
return[name[agi_result]] | keyword[def] identifier[send_command] ( identifier[self] , identifier[command] ):
literal[string]
identifier[command] += literal[string]
identifier[self] . identifier[writer] . identifier[write] ( identifier[command] . identifier[encode] ( identifier[self] . identifier[encoding] ))
keyword[yield] keyword[from] identifier[self] . identifier[writer] . identifier[drain] ()
identifier[agi_result] = keyword[yield] keyword[from] identifier[self] . identifier[_read_result] ()
keyword[while] identifier[agi_result] . identifier[get] ( literal[string] )== literal[int] :
identifier[agi_result] = keyword[yield] keyword[from] identifier[self] . identifier[_read_result] ()
keyword[if] literal[string] keyword[in] identifier[agi_result] keyword[and] identifier[agi_result] [ literal[string] ]== literal[string] :
identifier[buff_usage_error] = keyword[yield] keyword[from] identifier[self] . identifier[reader] . identifier[readline] ()
identifier[agi_result] [ literal[string] ]+= identifier[buff_usage_error] . identifier[decode] ( identifier[self] . identifier[encoding] )
keyword[return] identifier[agi_result] | def send_command(self, command):
"""Send a command for FastAGI request:
:param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds'
:type command: String
:Example:
::
@asyncio.coroutine
def call_waiting(request):
print(['AGI variables:', request.headers])
yield from request.send_command('ANSWER')
yield from request.send_command('EXEC StartMusicOnHold')
yield from request.send_command('EXEC Wait 10')
"""
command += '\n'
self.writer.write(command.encode(self.encoding))
yield from self.writer.drain()
agi_result = (yield from self._read_result())
# If Asterisk returns `100 Trying...`, wait for next the response.
while agi_result.get('status_code') == 100:
agi_result = (yield from self._read_result()) # depends on [control=['while'], data=[]]
# when we got AGIUsageError the following line contains some indication
if 'error' in agi_result and agi_result['error'] == 'AGIUsageError':
buff_usage_error = (yield from self.reader.readline())
agi_result['msg'] += buff_usage_error.decode(self.encoding) # depends on [control=['if'], data=[]]
return agi_result |
def enumerate(self, **kwargs):
'''Iterate through all possible sequences (lists). By default, will
stop after 50 items have been yielded. This value can be
change by supplying a different value via the max_enumerate kwarg.
'''
for item in self.set.enumerate(**kwargs):
yield flattened(item) | def function[enumerate, parameter[self]]:
constant[Iterate through all possible sequences (lists). By default, will
stop after 50 items have been yielded. This value can be
change by supplying a different value via the max_enumerate kwarg.
]
for taget[name[item]] in starred[call[name[self].set.enumerate, parameter[]]] begin[:]
<ast.Yield object at 0x7da2049638e0> | keyword[def] identifier[enumerate] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[set] . identifier[enumerate] (** identifier[kwargs] ):
keyword[yield] identifier[flattened] ( identifier[item] ) | def enumerate(self, **kwargs):
"""Iterate through all possible sequences (lists). By default, will
stop after 50 items have been yielded. This value can be
change by supplying a different value via the max_enumerate kwarg.
"""
for item in self.set.enumerate(**kwargs):
yield flattened(item) # depends on [control=['for'], data=['item']] |
def stop(self):
"""
Stop the running task
"""
if self._thread is not None and self._thread.isAlive():
self._done.set() | def function[stop, parameter[self]]:
constant[
Stop the running task
]
if <ast.BoolOp object at 0x7da1b1a95930> begin[:]
call[name[self]._done.set, parameter[]] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_thread] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_thread] . identifier[isAlive] ():
identifier[self] . identifier[_done] . identifier[set] () | def stop(self):
"""
Stop the running task
"""
if self._thread is not None and self._thread.isAlive():
self._done.set() # depends on [control=['if'], data=[]] |
def parse(self):
"""Parse this Berksfile into a dict."""
self.flush()
self.seek(0)
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
datamap = {}
for line in data:
if len(line) == 1:
datamap[line[0]] = True
elif len(line) == 2:
key, value = line
if key == 'cookbook':
datamap.setdefault('cookbook', {})
value = [utils.ruby_strip(v) for v in value.split(',')]
lib, detail = value[0], value[1:]
datamap['cookbook'].setdefault(lib, {})
# if there is additional dependency data but its
# not the ruby hash, its the version constraint
if detail and not any("".join(detail).startswith(o)
for o in self.berks_options):
constraint, detail = detail[0], detail[1:]
datamap['cookbook'][lib]['constraint'] = constraint
if detail:
for deet in detail:
opt, val = [
utils.ruby_strip(i)
for i in deet.split(':', 1)
]
if not any(opt == o for o in self.berks_options):
raise ValueError(
"Cookbook detail '%s' does not specify "
"one of '%s'" % (opt, self.berks_options))
else:
datamap['cookbook'][lib][opt.strip(':')] = (
utils.ruby_strip(val))
elif key == 'source':
datamap.setdefault(key, [])
datamap[key].append(utils.ruby_strip(value))
elif key:
datamap[key] = utils.ruby_strip(value)
self.seek(0)
return datamap | def function[parse, parameter[self]]:
constant[Parse this Berksfile into a dict.]
call[name[self].flush, parameter[]]
call[name[self].seek, parameter[constant[0]]]
variable[data] assign[=] call[name[utils].ruby_lines, parameter[call[name[self].readlines, parameter[]]]]
variable[data] assign[=] <ast.ListComp object at 0x7da20e74ba30>
variable[datamap] assign[=] dictionary[[], []]
for taget[name[line]] in starred[name[data]] begin[:]
if compare[call[name[len], parameter[name[line]]] equal[==] constant[1]] begin[:]
call[name[datamap]][call[name[line]][constant[0]]] assign[=] constant[True]
call[name[self].seek, parameter[constant[0]]]
return[name[datamap]] | keyword[def] identifier[parse] ( identifier[self] ):
literal[string]
identifier[self] . identifier[flush] ()
identifier[self] . identifier[seek] ( literal[int] )
identifier[data] = identifier[utils] . identifier[ruby_lines] ( identifier[self] . identifier[readlines] ())
identifier[data] =[ identifier[tuple] ( identifier[j] . identifier[strip] () keyword[for] identifier[j] keyword[in] identifier[line] . identifier[split] ( keyword[None] , literal[int] ))
keyword[for] identifier[line] keyword[in] identifier[data] ]
identifier[datamap] ={}
keyword[for] identifier[line] keyword[in] identifier[data] :
keyword[if] identifier[len] ( identifier[line] )== literal[int] :
identifier[datamap] [ identifier[line] [ literal[int] ]]= keyword[True]
keyword[elif] identifier[len] ( identifier[line] )== literal[int] :
identifier[key] , identifier[value] = identifier[line]
keyword[if] identifier[key] == literal[string] :
identifier[datamap] . identifier[setdefault] ( literal[string] ,{})
identifier[value] =[ identifier[utils] . identifier[ruby_strip] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[value] . identifier[split] ( literal[string] )]
identifier[lib] , identifier[detail] = identifier[value] [ literal[int] ], identifier[value] [ literal[int] :]
identifier[datamap] [ literal[string] ]. identifier[setdefault] ( identifier[lib] ,{})
keyword[if] identifier[detail] keyword[and] keyword[not] identifier[any] ( literal[string] . identifier[join] ( identifier[detail] ). identifier[startswith] ( identifier[o] )
keyword[for] identifier[o] keyword[in] identifier[self] . identifier[berks_options] ):
identifier[constraint] , identifier[detail] = identifier[detail] [ literal[int] ], identifier[detail] [ literal[int] :]
identifier[datamap] [ literal[string] ][ identifier[lib] ][ literal[string] ]= identifier[constraint]
keyword[if] identifier[detail] :
keyword[for] identifier[deet] keyword[in] identifier[detail] :
identifier[opt] , identifier[val] =[
identifier[utils] . identifier[ruby_strip] ( identifier[i] )
keyword[for] identifier[i] keyword[in] identifier[deet] . identifier[split] ( literal[string] , literal[int] )
]
keyword[if] keyword[not] identifier[any] ( identifier[opt] == identifier[o] keyword[for] identifier[o] keyword[in] identifier[self] . identifier[berks_options] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %( identifier[opt] , identifier[self] . identifier[berks_options] ))
keyword[else] :
identifier[datamap] [ literal[string] ][ identifier[lib] ][ identifier[opt] . identifier[strip] ( literal[string] )]=(
identifier[utils] . identifier[ruby_strip] ( identifier[val] ))
keyword[elif] identifier[key] == literal[string] :
identifier[datamap] . identifier[setdefault] ( identifier[key] ,[])
identifier[datamap] [ identifier[key] ]. identifier[append] ( identifier[utils] . identifier[ruby_strip] ( identifier[value] ))
keyword[elif] identifier[key] :
identifier[datamap] [ identifier[key] ]= identifier[utils] . identifier[ruby_strip] ( identifier[value] )
identifier[self] . identifier[seek] ( literal[int] )
keyword[return] identifier[datamap] | def parse(self):
"""Parse this Berksfile into a dict."""
self.flush()
self.seek(0)
data = utils.ruby_lines(self.readlines())
data = [tuple((j.strip() for j in line.split(None, 1))) for line in data]
datamap = {}
for line in data:
if len(line) == 1:
datamap[line[0]] = True # depends on [control=['if'], data=[]]
elif len(line) == 2:
(key, value) = line
if key == 'cookbook':
datamap.setdefault('cookbook', {})
value = [utils.ruby_strip(v) for v in value.split(',')]
(lib, detail) = (value[0], value[1:])
datamap['cookbook'].setdefault(lib, {})
# if there is additional dependency data but its
# not the ruby hash, its the version constraint
if detail and (not any((''.join(detail).startswith(o) for o in self.berks_options))):
(constraint, detail) = (detail[0], detail[1:])
datamap['cookbook'][lib]['constraint'] = constraint # depends on [control=['if'], data=[]]
if detail:
for deet in detail:
(opt, val) = [utils.ruby_strip(i) for i in deet.split(':', 1)]
if not any((opt == o for o in self.berks_options)):
raise ValueError("Cookbook detail '%s' does not specify one of '%s'" % (opt, self.berks_options)) # depends on [control=['if'], data=[]]
else:
datamap['cookbook'][lib][opt.strip(':')] = utils.ruby_strip(val) # depends on [control=['for'], data=['deet']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif key == 'source':
datamap.setdefault(key, [])
datamap[key].append(utils.ruby_strip(value)) # depends on [control=['if'], data=['key']]
elif key:
datamap[key] = utils.ruby_strip(value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
self.seek(0)
return datamap |
def ConvCnstrMOD(*args, **kwargs):
"""A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
problems, and returns an object instantiated with the provided
parameters. The wrapper is designed to allow the appropriate
object to be created by calling this function using the same
syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'ism'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_IterSM`. This
method works well for a small number of training images, but is very
slow for larger training sets.
- ``'cg'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_CG`. This
method is slower than ``'ism'`` for small training sets, but has better
run time scaling as the training set grows.
- ``'cns'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_Consensus`.
This method is a good choice for large training sets.
- ``'fista'`` :
Use the implementation defined in :class:`.fista.ccmod.ConvCnstrMOD`.
This method is the best choice for large training sets.
The default value is ``'fista'``.
"""
# Extract method selection argument or set default
method = kwargs.pop('method', 'fista')
# Assign base class depending on method selection argument
base = ccmod_class_label_lookup(method)
# Nested class with dynamically determined inheritance
class ConvCnstrMOD(base):
def __init__(self, *args, **kwargs):
super(ConvCnstrMOD, self).__init__(*args, **kwargs)
# Allow pickling of objects of type ConvCnstrMOD
_fix_dynamic_class_lookup(ConvCnstrMOD, method)
# Return object of the nested class type
return ConvCnstrMOD(*args, **kwargs) | def function[ConvCnstrMOD, parameter[]]:
constant[A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
problems, and returns an object instantiated with the provided
parameters. The wrapper is designed to allow the appropriate
object to be created by calling this function using the same
syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'ism'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_IterSM`. This
method works well for a small number of training images, but is very
slow for larger training sets.
- ``'cg'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_CG`. This
method is slower than ``'ism'`` for small training sets, but has better
run time scaling as the training set grows.
- ``'cns'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_Consensus`.
This method is a good choice for large training sets.
- ``'fista'`` :
Use the implementation defined in :class:`.fista.ccmod.ConvCnstrMOD`.
This method is the best choice for large training sets.
The default value is ``'fista'``.
]
variable[method] assign[=] call[name[kwargs].pop, parameter[constant[method], constant[fista]]]
variable[base] assign[=] call[name[ccmod_class_label_lookup], parameter[name[method]]]
class class[ConvCnstrMOD, parameter[]] begin[:]
def function[__init__, parameter[self]]:
call[call[name[super], parameter[name[ConvCnstrMOD], name[self]]].__init__, parameter[<ast.Starred object at 0x7da1b07f9330>]]
call[name[_fix_dynamic_class_lookup], parameter[name[ConvCnstrMOD], name[method]]]
return[call[name[ConvCnstrMOD], parameter[<ast.Starred object at 0x7da1b07fa080>]]] | keyword[def] identifier[ConvCnstrMOD] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[method] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[base] = identifier[ccmod_class_label_lookup] ( identifier[method] )
keyword[class] identifier[ConvCnstrMOD] ( identifier[base] ):
keyword[def] identifier[__init__] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[super] ( identifier[ConvCnstrMOD] , identifier[self] ). identifier[__init__] (* identifier[args] ,** identifier[kwargs] )
identifier[_fix_dynamic_class_lookup] ( identifier[ConvCnstrMOD] , identifier[method] )
keyword[return] identifier[ConvCnstrMOD] (* identifier[args] ,** identifier[kwargs] ) | def ConvCnstrMOD(*args, **kwargs):
"""A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
problems, and returns an object instantiated with the provided
parameters. The wrapper is designed to allow the appropriate
object to be created by calling this function using the same
syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'ism'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_IterSM`. This
method works well for a small number of training images, but is very
slow for larger training sets.
- ``'cg'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_CG`. This
method is slower than ``'ism'`` for small training sets, but has better
run time scaling as the training set grows.
- ``'cns'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_Consensus`.
This method is a good choice for large training sets.
- ``'fista'`` :
Use the implementation defined in :class:`.fista.ccmod.ConvCnstrMOD`.
This method is the best choice for large training sets.
The default value is ``'fista'``.
"""
# Extract method selection argument or set default
method = kwargs.pop('method', 'fista')
# Assign base class depending on method selection argument
base = ccmod_class_label_lookup(method)
# Nested class with dynamically determined inheritance
class ConvCnstrMOD(base):
def __init__(self, *args, **kwargs):
super(ConvCnstrMOD, self).__init__(*args, **kwargs)
# Allow pickling of objects of type ConvCnstrMOD
_fix_dynamic_class_lookup(ConvCnstrMOD, method)
# Return object of the nested class type
return ConvCnstrMOD(*args, **kwargs) |
def evaluate_list(molecules, ensemble_lookup, options):
"""
Evaluate a list of ensembles and return statistics and ROC plots if appropriate
"""
# create stats dictionaries to store results from each ensemble
stats = {} # {file name : metric_List}
# print progress messages
if options.write_roc:
print(" Determining virtual screening performance and writing ROC data ... ")
print('')
else:
print(" Determining virtual screening performance ...")
print('')
for filename in sorted(ensemble_lookup.keys()):
metric_List = calculate_metrics(molecules, ensemble_lookup, filename, options)
stats[filename] = metric_List
# write results summary
output.write_summary(stats, options, fw_type = None)
# plot
if options.plot:
print(" Making plots ... ")
print
plotter(molecules, ensemble_lookup, options) | def function[evaluate_list, parameter[molecules, ensemble_lookup, options]]:
constant[
Evaluate a list of ensembles and return statistics and ROC plots if appropriate
]
variable[stats] assign[=] dictionary[[], []]
if name[options].write_roc begin[:]
call[name[print], parameter[constant[ Determining virtual screening performance and writing ROC data ... ]]]
call[name[print], parameter[constant[]]]
for taget[name[filename]] in starred[call[name[sorted], parameter[call[name[ensemble_lookup].keys, parameter[]]]]] begin[:]
variable[metric_List] assign[=] call[name[calculate_metrics], parameter[name[molecules], name[ensemble_lookup], name[filename], name[options]]]
call[name[stats]][name[filename]] assign[=] name[metric_List]
call[name[output].write_summary, parameter[name[stats], name[options]]]
if name[options].plot begin[:]
call[name[print], parameter[constant[ Making plots ... ]]]
name[print]
call[name[plotter], parameter[name[molecules], name[ensemble_lookup], name[options]]] | keyword[def] identifier[evaluate_list] ( identifier[molecules] , identifier[ensemble_lookup] , identifier[options] ):
literal[string]
identifier[stats] ={}
keyword[if] identifier[options] . identifier[write_roc] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[for] identifier[filename] keyword[in] identifier[sorted] ( identifier[ensemble_lookup] . identifier[keys] ()):
identifier[metric_List] = identifier[calculate_metrics] ( identifier[molecules] , identifier[ensemble_lookup] , identifier[filename] , identifier[options] )
identifier[stats] [ identifier[filename] ]= identifier[metric_List]
identifier[output] . identifier[write_summary] ( identifier[stats] , identifier[options] , identifier[fw_type] = keyword[None] )
keyword[if] identifier[options] . identifier[plot] :
identifier[print] ( literal[string] )
identifier[print]
identifier[plotter] ( identifier[molecules] , identifier[ensemble_lookup] , identifier[options] ) | def evaluate_list(molecules, ensemble_lookup, options):
"""
Evaluate a list of ensembles and return statistics and ROC plots if appropriate
"""
# create stats dictionaries to store results from each ensemble
stats = {} # {file name : metric_List}
# print progress messages
if options.write_roc:
print(' Determining virtual screening performance and writing ROC data ... ')
print('') # depends on [control=['if'], data=[]]
else:
print(' Determining virtual screening performance ...')
print('')
for filename in sorted(ensemble_lookup.keys()):
metric_List = calculate_metrics(molecules, ensemble_lookup, filename, options)
stats[filename] = metric_List # depends on [control=['for'], data=['filename']]
# write results summary
output.write_summary(stats, options, fw_type=None)
# plot
if options.plot:
print(' Making plots ... ')
print
plotter(molecules, ensemble_lookup, options) # depends on [control=['if'], data=[]] |
def parse_config(args=sys.argv):
"""Parse the args using the config_file pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Read in the config file')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
return parser.parse_args(args[1:]) | def function[parse_config, parameter[args]]:
constant[Parse the args using the config_file pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[config_file]]]
return[call[name[parser].parse_args, parameter[call[name[args]][<ast.Slice object at 0x7da18f00e8c0>]]]] | keyword[def] identifier[parse_config] ( identifier[args] = identifier[sys] . identifier[argv] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[description] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[extant_file] )
keyword[return] identifier[parser] . identifier[parse_args] ( identifier[args] [ literal[int] :]) | def parse_config(args=sys.argv):
"""Parse the args using the config_file pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(description='Read in the config file')
parser.add_argument('config_file', help='Configuration file.', metavar='FILE', type=extant_file)
return parser.parse_args(args[1:]) |
def are_dates(self):
"""
Return |True| if the first category in this collection has a date
label (as opposed to str or numeric). A date label is one of type
datetime.date or datetime.datetime. Returns |False| otherwise,
including when this category collection is empty. It also returns
False when this category collection is hierarchical, because
hierarchical categories can only be written as string labels.
"""
if self.depth != 1:
return False
first_cat_label = self[0].label
date_types = (datetime.date, datetime.datetime)
if isinstance(first_cat_label, date_types):
return True
return False | def function[are_dates, parameter[self]]:
constant[
Return |True| if the first category in this collection has a date
label (as opposed to str or numeric). A date label is one of type
datetime.date or datetime.datetime. Returns |False| otherwise,
including when this category collection is empty. It also returns
False when this category collection is hierarchical, because
hierarchical categories can only be written as string labels.
]
if compare[name[self].depth not_equal[!=] constant[1]] begin[:]
return[constant[False]]
variable[first_cat_label] assign[=] call[name[self]][constant[0]].label
variable[date_types] assign[=] tuple[[<ast.Attribute object at 0x7da20c9902b0>, <ast.Attribute object at 0x7da20c991cc0>]]
if call[name[isinstance], parameter[name[first_cat_label], name[date_types]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[are_dates] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[depth] != literal[int] :
keyword[return] keyword[False]
identifier[first_cat_label] = identifier[self] [ literal[int] ]. identifier[label]
identifier[date_types] =( identifier[datetime] . identifier[date] , identifier[datetime] . identifier[datetime] )
keyword[if] identifier[isinstance] ( identifier[first_cat_label] , identifier[date_types] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def are_dates(self):
"""
Return |True| if the first category in this collection has a date
label (as opposed to str or numeric). A date label is one of type
datetime.date or datetime.datetime. Returns |False| otherwise,
including when this category collection is empty. It also returns
False when this category collection is hierarchical, because
hierarchical categories can only be written as string labels.
"""
if self.depth != 1:
return False # depends on [control=['if'], data=[]]
first_cat_label = self[0].label
date_types = (datetime.date, datetime.datetime)
if isinstance(first_cat_label, date_types):
return True # depends on [control=['if'], data=[]]
return False |
def _get_site_amplification_term(self, C, vs30):
"""
Returns the site amplification term for the case in which Vs30
is used directly
"""
return C["gamma"] * np.log10(vs30 / self.CONSTS["Vref"]) | def function[_get_site_amplification_term, parameter[self, C, vs30]]:
constant[
Returns the site amplification term for the case in which Vs30
is used directly
]
return[binary_operation[call[name[C]][constant[gamma]] * call[name[np].log10, parameter[binary_operation[name[vs30] / call[name[self].CONSTS][constant[Vref]]]]]]] | keyword[def] identifier[_get_site_amplification_term] ( identifier[self] , identifier[C] , identifier[vs30] ):
literal[string]
keyword[return] identifier[C] [ literal[string] ]* identifier[np] . identifier[log10] ( identifier[vs30] / identifier[self] . identifier[CONSTS] [ literal[string] ]) | def _get_site_amplification_term(self, C, vs30):
"""
Returns the site amplification term for the case in which Vs30
is used directly
"""
return C['gamma'] * np.log10(vs30 / self.CONSTS['Vref']) |
def tas2mach(Vtas, H):
"""True Airspeed to Mach number"""
a = vsound(H)
Mach = Vtas/a
return Mach | def function[tas2mach, parameter[Vtas, H]]:
constant[True Airspeed to Mach number]
variable[a] assign[=] call[name[vsound], parameter[name[H]]]
variable[Mach] assign[=] binary_operation[name[Vtas] / name[a]]
return[name[Mach]] | keyword[def] identifier[tas2mach] ( identifier[Vtas] , identifier[H] ):
literal[string]
identifier[a] = identifier[vsound] ( identifier[H] )
identifier[Mach] = identifier[Vtas] / identifier[a]
keyword[return] identifier[Mach] | def tas2mach(Vtas, H):
"""True Airspeed to Mach number"""
a = vsound(H)
Mach = Vtas / a
return Mach |
def init_app(kls):
"""
To bind middlewares, plugins that needs the 'app' object to init
Bound middlewares will be assigned on cls.init()
"""
if not hasattr(kls, "__call__"):
raise exceptions.MochaError("init_app: '%s' is not callable" % kls)
Mocha._init_apps.add(kls)
return kls | def function[init_app, parameter[kls]]:
constant[
To bind middlewares, plugins that needs the 'app' object to init
Bound middlewares will be assigned on cls.init()
]
if <ast.UnaryOp object at 0x7da18dc06770> begin[:]
<ast.Raise object at 0x7da18dc04a90>
call[name[Mocha]._init_apps.add, parameter[name[kls]]]
return[name[kls]] | keyword[def] identifier[init_app] ( identifier[kls] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[kls] , literal[string] ):
keyword[raise] identifier[exceptions] . identifier[MochaError] ( literal[string] % identifier[kls] )
identifier[Mocha] . identifier[_init_apps] . identifier[add] ( identifier[kls] )
keyword[return] identifier[kls] | def init_app(kls):
"""
To bind middlewares, plugins that needs the 'app' object to init
Bound middlewares will be assigned on cls.init()
"""
if not hasattr(kls, '__call__'):
raise exceptions.MochaError("init_app: '%s' is not callable" % kls) # depends on [control=['if'], data=[]]
Mocha._init_apps.add(kls)
return kls |
def _get_socket(self, sslversion=ssl.PROTOCOL_TLSv1):
"""Sets up an https connection and do an HTTP/raw socket request
:param sslversion: version of ssl session
:raises: IloConnectionError, for connection failures
:returns: ssl wrapped socket object
"""
err = None
sock = None
try:
for res in socket.getaddrinfo(
self.hostname, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(self.timeout)
# Connecting to {self.hostname} at port {self.port}
sock.connect(sa)
except socket.timeout:
if sock is not None:
sock.close()
err = exception.IloConnectionError(
"Timeout connecting to %(hostname)s:%(port)d"
% {'hostname': self.hostname, 'port': self.port})
except socket.error:
if sock is not None:
sock.close()
e = sys.exc_info()[1]
err = exception.IloConnectionError(
"Error connecting to %(hostname)s:%(port)d : %(error)s"
% {'hostname': self.hostname, 'port': self.port,
'error': str(e)})
except Exception:
raise exception.IloConnectionError(
"Unable to resolve %s" % self.hostname)
if err is not None:
raise err
# wrapping the socket over ssl session
try:
return ssl.wrap_socket(sock, ssl_version=sslversion)
except socket.sslerror:
e = sys.exc_info()[1]
msg = (getattr(e, 'reason', None) or
getattr(e, 'message', None))
# Some older iLO s don't support TLSv1, retry with SSLv3
if ('wrong version number' in msg) and (
sslversion == ssl.PROTOCOL_TLSv1):
return self._get_socket(ssl.PROTOCOL_SSLv3)
raise exception.IloConnectionError(
"Cannot establish ssl session with %(hostname)s:%(port)d : "
"%(error)s" % {'hostname': self.hostname, 'port': self.port,
'error': str(e)}) | def function[_get_socket, parameter[self, sslversion]]:
constant[Sets up an https connection and do an HTTP/raw socket request
:param sslversion: version of ssl session
:raises: IloConnectionError, for connection failures
:returns: ssl wrapped socket object
]
variable[err] assign[=] constant[None]
variable[sock] assign[=] constant[None]
<ast.Try object at 0x7da20c6a9660>
if compare[name[err] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1a795d0>
<ast.Try object at 0x7da1b1a798d0> | keyword[def] identifier[_get_socket] ( identifier[self] , identifier[sslversion] = identifier[ssl] . identifier[PROTOCOL_TLSv1] ):
literal[string]
identifier[err] = keyword[None]
identifier[sock] = keyword[None]
keyword[try] :
keyword[for] identifier[res] keyword[in] identifier[socket] . identifier[getaddrinfo] (
identifier[self] . identifier[hostname] , identifier[self] . identifier[port] , literal[int] , identifier[socket] . identifier[SOCK_STREAM] ):
identifier[af] , identifier[socktype] , identifier[proto] , identifier[canonname] , identifier[sa] = identifier[res]
keyword[try] :
identifier[sock] = identifier[socket] . identifier[socket] ( identifier[af] , identifier[socktype] , identifier[proto] )
identifier[sock] . identifier[settimeout] ( identifier[self] . identifier[timeout] )
identifier[sock] . identifier[connect] ( identifier[sa] )
keyword[except] identifier[socket] . identifier[timeout] :
keyword[if] identifier[sock] keyword[is] keyword[not] keyword[None] :
identifier[sock] . identifier[close] ()
identifier[err] = identifier[exception] . identifier[IloConnectionError] (
literal[string]
%{ literal[string] : identifier[self] . identifier[hostname] , literal[string] : identifier[self] . identifier[port] })
keyword[except] identifier[socket] . identifier[error] :
keyword[if] identifier[sock] keyword[is] keyword[not] keyword[None] :
identifier[sock] . identifier[close] ()
identifier[e] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
identifier[err] = identifier[exception] . identifier[IloConnectionError] (
literal[string]
%{ literal[string] : identifier[self] . identifier[hostname] , literal[string] : identifier[self] . identifier[port] ,
literal[string] : identifier[str] ( identifier[e] )})
keyword[except] identifier[Exception] :
keyword[raise] identifier[exception] . identifier[IloConnectionError] (
literal[string] % identifier[self] . identifier[hostname] )
keyword[if] identifier[err] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[err]
keyword[try] :
keyword[return] identifier[ssl] . identifier[wrap_socket] ( identifier[sock] , identifier[ssl_version] = identifier[sslversion] )
keyword[except] identifier[socket] . identifier[sslerror] :
identifier[e] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
identifier[msg] =( identifier[getattr] ( identifier[e] , literal[string] , keyword[None] ) keyword[or]
identifier[getattr] ( identifier[e] , literal[string] , keyword[None] ))
keyword[if] ( literal[string] keyword[in] identifier[msg] ) keyword[and] (
identifier[sslversion] == identifier[ssl] . identifier[PROTOCOL_TLSv1] ):
keyword[return] identifier[self] . identifier[_get_socket] ( identifier[ssl] . identifier[PROTOCOL_SSLv3] )
keyword[raise] identifier[exception] . identifier[IloConnectionError] (
literal[string]
literal[string] %{ literal[string] : identifier[self] . identifier[hostname] , literal[string] : identifier[self] . identifier[port] ,
literal[string] : identifier[str] ( identifier[e] )}) | def _get_socket(self, sslversion=ssl.PROTOCOL_TLSv1):
"""Sets up an https connection and do an HTTP/raw socket request
:param sslversion: version of ssl session
:raises: IloConnectionError, for connection failures
:returns: ssl wrapped socket object
"""
err = None
sock = None
try:
for res in socket.getaddrinfo(self.hostname, self.port, 0, socket.SOCK_STREAM):
(af, socktype, proto, canonname, sa) = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(self.timeout)
# Connecting to {self.hostname} at port {self.port}
sock.connect(sa) # depends on [control=['try'], data=[]]
except socket.timeout:
if sock is not None:
sock.close() # depends on [control=['if'], data=['sock']]
err = exception.IloConnectionError('Timeout connecting to %(hostname)s:%(port)d' % {'hostname': self.hostname, 'port': self.port}) # depends on [control=['except'], data=[]]
except socket.error:
if sock is not None:
sock.close() # depends on [control=['if'], data=['sock']]
e = sys.exc_info()[1]
err = exception.IloConnectionError('Error connecting to %(hostname)s:%(port)d : %(error)s' % {'hostname': self.hostname, 'port': self.port, 'error': str(e)}) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['res']] # depends on [control=['try'], data=[]]
except Exception:
raise exception.IloConnectionError('Unable to resolve %s' % self.hostname) # depends on [control=['except'], data=[]]
if err is not None:
raise err # depends on [control=['if'], data=['err']]
# wrapping the socket over ssl session
try:
return ssl.wrap_socket(sock, ssl_version=sslversion) # depends on [control=['try'], data=[]]
except socket.sslerror:
e = sys.exc_info()[1]
msg = getattr(e, 'reason', None) or getattr(e, 'message', None)
# Some older iLO s don't support TLSv1, retry with SSLv3
if 'wrong version number' in msg and sslversion == ssl.PROTOCOL_TLSv1:
return self._get_socket(ssl.PROTOCOL_SSLv3) # depends on [control=['if'], data=[]]
raise exception.IloConnectionError('Cannot establish ssl session with %(hostname)s:%(port)d : %(error)s' % {'hostname': self.hostname, 'port': self.port, 'error': str(e)}) # depends on [control=['except'], data=[]] |
def in_degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, in_degree) at time t.
The node degree is the number of edges incoming to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.in_degree_iter(0, t=0))
[(0, 0)]
>>> list(G.in_degree_iter([0,1], t=0))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs = self._pred.items()
else:
nodes_nbrs = ((n, self._pred[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for n, nbrs in nodes_nbrs:
deg = len(self._pred[n])
yield (n, deg)
else:
for n, nbrs in nodes_nbrs:
edges_t = len([v for v in nbrs.keys() if self.__presence_test(v, n, t)])
if edges_t > 0:
yield (n, edges_t)
else:
yield (n, 0) | def function[in_degree_iter, parameter[self, nbunch, t]]:
constant[Return an iterator for (node, in_degree) at time t.
The node degree is the number of edges incoming to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.in_degree_iter(0, t=0))
[(0, 0)]
>>> list(G.in_degree_iter([0,1], t=0))
[(0, 0), (1, 1)]
]
if compare[name[nbunch] is constant[None]] begin[:]
variable[nodes_nbrs] assign[=] call[name[self]._pred.items, parameter[]]
if compare[name[t] is constant[None]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b05d8490>, <ast.Name object at 0x7da1b05d8a00>]]] in starred[name[nodes_nbrs]] begin[:]
variable[deg] assign[=] call[name[len], parameter[call[name[self]._pred][name[n]]]]
<ast.Yield object at 0x7da1b05d8670> | keyword[def] identifier[in_degree_iter] ( identifier[self] , identifier[nbunch] = keyword[None] , identifier[t] = keyword[None] ):
literal[string]
keyword[if] identifier[nbunch] keyword[is] keyword[None] :
identifier[nodes_nbrs] = identifier[self] . identifier[_pred] . identifier[items] ()
keyword[else] :
identifier[nodes_nbrs] =(( identifier[n] , identifier[self] . identifier[_pred] [ identifier[n] ]) keyword[for] identifier[n] keyword[in] identifier[self] . identifier[nbunch_iter] ( identifier[nbunch] ))
keyword[if] identifier[t] keyword[is] keyword[None] :
keyword[for] identifier[n] , identifier[nbrs] keyword[in] identifier[nodes_nbrs] :
identifier[deg] = identifier[len] ( identifier[self] . identifier[_pred] [ identifier[n] ])
keyword[yield] ( identifier[n] , identifier[deg] )
keyword[else] :
keyword[for] identifier[n] , identifier[nbrs] keyword[in] identifier[nodes_nbrs] :
identifier[edges_t] = identifier[len] ([ identifier[v] keyword[for] identifier[v] keyword[in] identifier[nbrs] . identifier[keys] () keyword[if] identifier[self] . identifier[__presence_test] ( identifier[v] , identifier[n] , identifier[t] )])
keyword[if] identifier[edges_t] > literal[int] :
keyword[yield] ( identifier[n] , identifier[edges_t] )
keyword[else] :
keyword[yield] ( identifier[n] , literal[int] ) | def in_degree_iter(self, nbunch=None, t=None):
"""Return an iterator for (node, in_degree) at time t.
The node degree is the number of edges incoming to the node in a given timeframe.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned an iterator over the degree of nodes on the flattened graph.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0, 1, t=0)
>>> list(G.in_degree_iter(0, t=0))
[(0, 0)]
>>> list(G.in_degree_iter([0,1], t=0))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs = self._pred.items() # depends on [control=['if'], data=[]]
else:
nodes_nbrs = ((n, self._pred[n]) for n in self.nbunch_iter(nbunch))
if t is None:
for (n, nbrs) in nodes_nbrs:
deg = len(self._pred[n])
yield (n, deg) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
for (n, nbrs) in nodes_nbrs:
edges_t = len([v for v in nbrs.keys() if self.__presence_test(v, n, t)])
if edges_t > 0:
yield (n, edges_t) # depends on [control=['if'], data=['edges_t']]
else:
yield (n, 0) # depends on [control=['for'], data=[]] |
def upload_gif(gif):
"""Uploads an image file to Imgur"""
client_id = os.environ.get('IMGUR_API_ID')
client_secret = os.environ.get('IMGUR_API_SECRET')
if client_id is None or client_secret is None:
click.echo('Cannot upload - could not find IMGUR_API_ID or IMGUR_API_SECRET environment variables')
return
client = ImgurClient(client_id, client_secret)
click.echo('Uploading file {}'.format(click.format_filename(gif)))
response = client.upload_from_path(gif)
click.echo('File uploaded - see your gif at {}'.format(response['link'])) | def function[upload_gif, parameter[gif]]:
constant[Uploads an image file to Imgur]
variable[client_id] assign[=] call[name[os].environ.get, parameter[constant[IMGUR_API_ID]]]
variable[client_secret] assign[=] call[name[os].environ.get, parameter[constant[IMGUR_API_SECRET]]]
if <ast.BoolOp object at 0x7da207f03490> begin[:]
call[name[click].echo, parameter[constant[Cannot upload - could not find IMGUR_API_ID or IMGUR_API_SECRET environment variables]]]
return[None]
variable[client] assign[=] call[name[ImgurClient], parameter[name[client_id], name[client_secret]]]
call[name[click].echo, parameter[call[constant[Uploading file {}].format, parameter[call[name[click].format_filename, parameter[name[gif]]]]]]]
variable[response] assign[=] call[name[client].upload_from_path, parameter[name[gif]]]
call[name[click].echo, parameter[call[constant[File uploaded - see your gif at {}].format, parameter[call[name[response]][constant[link]]]]]] | keyword[def] identifier[upload_gif] ( identifier[gif] ):
literal[string]
identifier[client_id] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
identifier[client_secret] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] identifier[client_id] keyword[is] keyword[None] keyword[or] identifier[client_secret] keyword[is] keyword[None] :
identifier[click] . identifier[echo] ( literal[string] )
keyword[return]
identifier[client] = identifier[ImgurClient] ( identifier[client_id] , identifier[client_secret] )
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[click] . identifier[format_filename] ( identifier[gif] )))
identifier[response] = identifier[client] . identifier[upload_from_path] ( identifier[gif] )
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[response] [ literal[string] ])) | def upload_gif(gif):
"""Uploads an image file to Imgur"""
client_id = os.environ.get('IMGUR_API_ID')
client_secret = os.environ.get('IMGUR_API_SECRET')
if client_id is None or client_secret is None:
click.echo('Cannot upload - could not find IMGUR_API_ID or IMGUR_API_SECRET environment variables')
return # depends on [control=['if'], data=[]]
client = ImgurClient(client_id, client_secret)
click.echo('Uploading file {}'.format(click.format_filename(gif)))
response = client.upload_from_path(gif)
click.echo('File uploaded - see your gif at {}'.format(response['link'])) |
def parse(lines, root=None):
"""
Parses a list of lines from ls into dictionaries representing their
components.
Args:
lines (list): A list of lines generated by ls.
root (str): The directory name to be used for ls output stanzas that
don't have a name.
Returns:
A dictionary representing the ls output. It's keyed by the path
containing each ls stanza.
"""
doc = {}
entries = []
name = None
total = None
for line in lines:
line = line.strip()
if not line:
continue
if line and line[0] == "/" and line[-1] == ":":
if name is None:
name = line[:-1]
if entries:
d = Directory(name, total or len(entries), entries)
doc[root] = d
total = None
entries = []
else:
d = Directory(name, total or len(entries), entries)
doc[name or root] = d
total = None
entries = []
name = line[:-1]
continue
if line.startswith("total"):
total = int(line.split(None, 1)[1])
continue
entries.append(line)
name = name or root
doc[name] = Directory(name, total or len(entries), entries)
return doc | def function[parse, parameter[lines, root]]:
constant[
Parses a list of lines from ls into dictionaries representing their
components.
Args:
lines (list): A list of lines generated by ls.
root (str): The directory name to be used for ls output stanzas that
don't have a name.
Returns:
A dictionary representing the ls output. It's keyed by the path
containing each ls stanza.
]
variable[doc] assign[=] dictionary[[], []]
variable[entries] assign[=] list[[]]
variable[name] assign[=] constant[None]
variable[total] assign[=] constant[None]
for taget[name[line]] in starred[name[lines]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if <ast.UnaryOp object at 0x7da18dc997e0> begin[:]
continue
if <ast.BoolOp object at 0x7da18dc980d0> begin[:]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] call[name[line]][<ast.Slice object at 0x7da18dc99db0>]
if name[entries] begin[:]
variable[d] assign[=] call[name[Directory], parameter[name[name], <ast.BoolOp object at 0x7da18dc9a560>, name[entries]]]
call[name[doc]][name[root]] assign[=] name[d]
variable[total] assign[=] constant[None]
variable[entries] assign[=] list[[]]
continue
if call[name[line].startswith, parameter[constant[total]]] begin[:]
variable[total] assign[=] call[name[int], parameter[call[call[name[line].split, parameter[constant[None], constant[1]]]][constant[1]]]]
continue
call[name[entries].append, parameter[name[line]]]
variable[name] assign[=] <ast.BoolOp object at 0x7da20c76c0a0>
call[name[doc]][name[name]] assign[=] call[name[Directory], parameter[name[name], <ast.BoolOp object at 0x7da20c76dd20>, name[entries]]]
return[name[doc]] | keyword[def] identifier[parse] ( identifier[lines] , identifier[root] = keyword[None] ):
literal[string]
identifier[doc] ={}
identifier[entries] =[]
identifier[name] = keyword[None]
identifier[total] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] keyword[not] identifier[line] :
keyword[continue]
keyword[if] identifier[line] keyword[and] identifier[line] [ literal[int] ]== literal[string] keyword[and] identifier[line] [- literal[int] ]== literal[string] :
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[line] [:- literal[int] ]
keyword[if] identifier[entries] :
identifier[d] = identifier[Directory] ( identifier[name] , identifier[total] keyword[or] identifier[len] ( identifier[entries] ), identifier[entries] )
identifier[doc] [ identifier[root] ]= identifier[d]
identifier[total] = keyword[None]
identifier[entries] =[]
keyword[else] :
identifier[d] = identifier[Directory] ( identifier[name] , identifier[total] keyword[or] identifier[len] ( identifier[entries] ), identifier[entries] )
identifier[doc] [ identifier[name] keyword[or] identifier[root] ]= identifier[d]
identifier[total] = keyword[None]
identifier[entries] =[]
identifier[name] = identifier[line] [:- literal[int] ]
keyword[continue]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[total] = identifier[int] ( identifier[line] . identifier[split] ( keyword[None] , literal[int] )[ literal[int] ])
keyword[continue]
identifier[entries] . identifier[append] ( identifier[line] )
identifier[name] = identifier[name] keyword[or] identifier[root]
identifier[doc] [ identifier[name] ]= identifier[Directory] ( identifier[name] , identifier[total] keyword[or] identifier[len] ( identifier[entries] ), identifier[entries] )
keyword[return] identifier[doc] | def parse(lines, root=None):
"""
Parses a list of lines from ls into dictionaries representing their
components.
Args:
lines (list): A list of lines generated by ls.
root (str): The directory name to be used for ls output stanzas that
don't have a name.
Returns:
A dictionary representing the ls output. It's keyed by the path
containing each ls stanza.
"""
doc = {}
entries = []
name = None
total = None
for line in lines:
line = line.strip()
if not line:
continue # depends on [control=['if'], data=[]]
if line and line[0] == '/' and (line[-1] == ':'):
if name is None:
name = line[:-1]
if entries:
d = Directory(name, total or len(entries), entries)
doc[root] = d
total = None
entries = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['name']]
else:
d = Directory(name, total or len(entries), entries)
doc[name or root] = d
total = None
entries = []
name = line[:-1]
continue # depends on [control=['if'], data=[]]
if line.startswith('total'):
total = int(line.split(None, 1)[1])
continue # depends on [control=['if'], data=[]]
entries.append(line) # depends on [control=['for'], data=['line']]
name = name or root
doc[name] = Directory(name, total or len(entries), entries)
return doc |
def _bgzip_from_bam(bam_file, dirs, data, is_retry=False, output_infix=''):
"""Create bgzipped fastq files from an input BAM file.
"""
# tools
config = data["config"]
bamtofastq = config_utils.get_program("bamtofastq", config)
resources = config_utils.get_resources("bamtofastq", config)
cores = config["algorithm"].get("num_cores", 1)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores
bgzip = tools.get_bgzip_cmd(config, is_retry)
# files
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep"))
out_file_1 = os.path.join(work_dir, "%s%s-1.fq.gz" % (os.path.splitext(os.path.basename(bam_file))[0], output_infix))
out_file_2 = out_file_1.replace("-1.fq.gz", "-2.fq.gz")
needs_retry = False
if is_retry or not utils.file_exists(out_file_1):
if not bam.is_paired(bam_file):
out_file_2 = None
with file_transaction(config, out_file_1) as tx_out_file:
for f in [tx_out_file, out_file_1, out_file_2]:
if f and os.path.exists(f):
os.remove(f)
fq1_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, tx_out_file)
prep_cmd = _seqtk_fastq_prep_cl(data, read_num=0)
if prep_cmd:
fq1_bgzip_cmd = prep_cmd + " | " + fq1_bgzip_cmd
sortprefix = "%s-sort" % os.path.splitext(tx_out_file)[0]
if bam.is_paired(bam_file):
prep_cmd = _seqtk_fastq_prep_cl(data, read_num=1)
fq2_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, out_file_2)
if prep_cmd:
fq2_bgzip_cmd = prep_cmd + " | " + fq2_bgzip_cmd
out_str = ("F=>({fq1_bgzip_cmd}) F2=>({fq2_bgzip_cmd}) S=/dev/null O=/dev/null "
"O2=/dev/null collate=1 colsbs={max_mem}")
else:
out_str = "S=>({fq1_bgzip_cmd})"
bam_file = objectstore.cl_input(bam_file)
extra_opts = " ".join([str(x) for x in resources.get("options", [])])
cmd = "{bamtofastq} filename={bam_file} T={sortprefix} {extra_opts} " + out_str
try:
do.run(cmd.format(**locals()), "BAM to bgzipped fastq",
checks=[do.file_reasonable_size(tx_out_file, bam_file)],
log_error=False)
except subprocess.CalledProcessError as msg:
if not is_retry and "deflate failed" in str(msg):
logger.info("bamtofastq deflate IO failure preparing %s. Retrying with single core."
% (bam_file))
needs_retry = True
else:
logger.exception()
raise
if needs_retry:
return _bgzip_from_bam(bam_file, dirs, data, is_retry=True)
else:
return [x for x in [out_file_1, out_file_2] if x is not None and utils.file_exists(x)] | def function[_bgzip_from_bam, parameter[bam_file, dirs, data, is_retry, output_infix]]:
constant[Create bgzipped fastq files from an input BAM file.
]
variable[config] assign[=] call[name[data]][constant[config]]
variable[bamtofastq] assign[=] call[name[config_utils].get_program, parameter[constant[bamtofastq], name[config]]]
variable[resources] assign[=] call[name[config_utils].get_resources, parameter[constant[bamtofastq], name[config]]]
variable[cores] assign[=] call[call[name[config]][constant[algorithm]].get, parameter[constant[num_cores], constant[1]]]
variable[max_mem] assign[=] binary_operation[call[name[config_utils].convert_to_bytes, parameter[call[name[resources].get, parameter[constant[memory], constant[1G]]]]] * name[cores]]
variable[bgzip] assign[=] call[name[tools].get_bgzip_cmd, parameter[name[config], name[is_retry]]]
variable[work_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[name[dirs]][constant[work]], constant[align_prep]]]]]
variable[out_file_1] assign[=] call[name[os].path.join, parameter[name[work_dir], binary_operation[constant[%s%s-1.fq.gz] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18bccaec0>, <ast.Name object at 0x7da18bcc9690>]]]]]
variable[out_file_2] assign[=] call[name[out_file_1].replace, parameter[constant[-1.fq.gz], constant[-2.fq.gz]]]
variable[needs_retry] assign[=] constant[False]
if <ast.BoolOp object at 0x7da18bccb7c0> begin[:]
if <ast.UnaryOp object at 0x7da18bcc9060> begin[:]
variable[out_file_2] assign[=] constant[None]
with call[name[file_transaction], parameter[name[config], name[out_file_1]]] begin[:]
for taget[name[f]] in starred[list[[<ast.Name object at 0x7da18bccb5e0>, <ast.Name object at 0x7da18bccae60>, <ast.Name object at 0x7da18f09fb20>]]] begin[:]
if <ast.BoolOp object at 0x7da18f09f7f0> begin[:]
call[name[os].remove, parameter[name[f]]]
variable[fq1_bgzip_cmd] assign[=] binary_operation[constant[%s -c /dev/stdin > %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f09ffa0>, <ast.Name object at 0x7da18f09c550>]]]
variable[prep_cmd] assign[=] call[name[_seqtk_fastq_prep_cl], parameter[name[data]]]
if name[prep_cmd] begin[:]
variable[fq1_bgzip_cmd] assign[=] binary_operation[binary_operation[name[prep_cmd] + constant[ | ]] + name[fq1_bgzip_cmd]]
variable[sortprefix] assign[=] binary_operation[constant[%s-sort] <ast.Mod object at 0x7da2590d6920> call[call[name[os].path.splitext, parameter[name[tx_out_file]]]][constant[0]]]
if call[name[bam].is_paired, parameter[name[bam_file]]] begin[:]
variable[prep_cmd] assign[=] call[name[_seqtk_fastq_prep_cl], parameter[name[data]]]
variable[fq2_bgzip_cmd] assign[=] binary_operation[constant[%s -c /dev/stdin > %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1831450>, <ast.Name object at 0x7da1b1833d30>]]]
if name[prep_cmd] begin[:]
variable[fq2_bgzip_cmd] assign[=] binary_operation[binary_operation[name[prep_cmd] + constant[ | ]] + name[fq2_bgzip_cmd]]
variable[out_str] assign[=] constant[F=>({fq1_bgzip_cmd}) F2=>({fq2_bgzip_cmd}) S=/dev/null O=/dev/null O2=/dev/null collate=1 colsbs={max_mem}]
variable[bam_file] assign[=] call[name[objectstore].cl_input, parameter[name[bam_file]]]
variable[extra_opts] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b18305e0>]]
variable[cmd] assign[=] binary_operation[constant[{bamtofastq} filename={bam_file} T={sortprefix} {extra_opts} ] + name[out_str]]
<ast.Try object at 0x7da1b1832920>
if name[needs_retry] begin[:]
return[call[name[_bgzip_from_bam], parameter[name[bam_file], name[dirs], name[data]]]] | keyword[def] identifier[_bgzip_from_bam] ( identifier[bam_file] , identifier[dirs] , identifier[data] , identifier[is_retry] = keyword[False] , identifier[output_infix] = literal[string] ):
literal[string]
identifier[config] = identifier[data] [ literal[string] ]
identifier[bamtofastq] = identifier[config_utils] . identifier[get_program] ( literal[string] , identifier[config] )
identifier[resources] = identifier[config_utils] . identifier[get_resources] ( literal[string] , identifier[config] )
identifier[cores] = identifier[config] [ literal[string] ]. identifier[get] ( literal[string] , literal[int] )
identifier[max_mem] = identifier[config_utils] . identifier[convert_to_bytes] ( identifier[resources] . identifier[get] ( literal[string] , literal[string] ))* identifier[cores]
identifier[bgzip] = identifier[tools] . identifier[get_bgzip_cmd] ( identifier[config] , identifier[is_retry] )
identifier[work_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dirs] [ literal[string] ], literal[string] ))
identifier[out_file_1] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] %( identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[bam_file] ))[ literal[int] ], identifier[output_infix] ))
identifier[out_file_2] = identifier[out_file_1] . identifier[replace] ( literal[string] , literal[string] )
identifier[needs_retry] = keyword[False]
keyword[if] identifier[is_retry] keyword[or] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file_1] ):
keyword[if] keyword[not] identifier[bam] . identifier[is_paired] ( identifier[bam_file] ):
identifier[out_file_2] = keyword[None]
keyword[with] identifier[file_transaction] ( identifier[config] , identifier[out_file_1] ) keyword[as] identifier[tx_out_file] :
keyword[for] identifier[f] keyword[in] [ identifier[tx_out_file] , identifier[out_file_1] , identifier[out_file_2] ]:
keyword[if] identifier[f] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[f] ):
identifier[os] . identifier[remove] ( identifier[f] )
identifier[fq1_bgzip_cmd] = literal[string] %( identifier[bgzip] , identifier[tx_out_file] )
identifier[prep_cmd] = identifier[_seqtk_fastq_prep_cl] ( identifier[data] , identifier[read_num] = literal[int] )
keyword[if] identifier[prep_cmd] :
identifier[fq1_bgzip_cmd] = identifier[prep_cmd] + literal[string] + identifier[fq1_bgzip_cmd]
identifier[sortprefix] = literal[string] % identifier[os] . identifier[path] . identifier[splitext] ( identifier[tx_out_file] )[ literal[int] ]
keyword[if] identifier[bam] . identifier[is_paired] ( identifier[bam_file] ):
identifier[prep_cmd] = identifier[_seqtk_fastq_prep_cl] ( identifier[data] , identifier[read_num] = literal[int] )
identifier[fq2_bgzip_cmd] = literal[string] %( identifier[bgzip] , identifier[out_file_2] )
keyword[if] identifier[prep_cmd] :
identifier[fq2_bgzip_cmd] = identifier[prep_cmd] + literal[string] + identifier[fq2_bgzip_cmd]
identifier[out_str] =( literal[string]
literal[string] )
keyword[else] :
identifier[out_str] = literal[string]
identifier[bam_file] = identifier[objectstore] . identifier[cl_input] ( identifier[bam_file] )
identifier[extra_opts] = literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[resources] . identifier[get] ( literal[string] ,[])])
identifier[cmd] = literal[string] + identifier[out_str]
keyword[try] :
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] ,
identifier[checks] =[ identifier[do] . identifier[file_reasonable_size] ( identifier[tx_out_file] , identifier[bam_file] )],
identifier[log_error] = keyword[False] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[msg] :
keyword[if] keyword[not] identifier[is_retry] keyword[and] literal[string] keyword[in] identifier[str] ( identifier[msg] ):
identifier[logger] . identifier[info] ( literal[string]
%( identifier[bam_file] ))
identifier[needs_retry] = keyword[True]
keyword[else] :
identifier[logger] . identifier[exception] ()
keyword[raise]
keyword[if] identifier[needs_retry] :
keyword[return] identifier[_bgzip_from_bam] ( identifier[bam_file] , identifier[dirs] , identifier[data] , identifier[is_retry] = keyword[True] )
keyword[else] :
keyword[return] [ identifier[x] keyword[for] identifier[x] keyword[in] [ identifier[out_file_1] , identifier[out_file_2] ] keyword[if] identifier[x] keyword[is] keyword[not] keyword[None] keyword[and] identifier[utils] . identifier[file_exists] ( identifier[x] )] | def _bgzip_from_bam(bam_file, dirs, data, is_retry=False, output_infix=''):
"""Create bgzipped fastq files from an input BAM file.
"""
# tools
config = data['config']
bamtofastq = config_utils.get_program('bamtofastq', config)
resources = config_utils.get_resources('bamtofastq', config)
cores = config['algorithm'].get('num_cores', 1)
max_mem = config_utils.convert_to_bytes(resources.get('memory', '1G')) * cores
bgzip = tools.get_bgzip_cmd(config, is_retry)
# files
work_dir = utils.safe_makedir(os.path.join(dirs['work'], 'align_prep'))
out_file_1 = os.path.join(work_dir, '%s%s-1.fq.gz' % (os.path.splitext(os.path.basename(bam_file))[0], output_infix))
out_file_2 = out_file_1.replace('-1.fq.gz', '-2.fq.gz')
needs_retry = False
if is_retry or not utils.file_exists(out_file_1):
if not bam.is_paired(bam_file):
out_file_2 = None # depends on [control=['if'], data=[]]
with file_transaction(config, out_file_1) as tx_out_file:
for f in [tx_out_file, out_file_1, out_file_2]:
if f and os.path.exists(f):
os.remove(f) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
fq1_bgzip_cmd = '%s -c /dev/stdin > %s' % (bgzip, tx_out_file)
prep_cmd = _seqtk_fastq_prep_cl(data, read_num=0)
if prep_cmd:
fq1_bgzip_cmd = prep_cmd + ' | ' + fq1_bgzip_cmd # depends on [control=['if'], data=[]]
sortprefix = '%s-sort' % os.path.splitext(tx_out_file)[0]
if bam.is_paired(bam_file):
prep_cmd = _seqtk_fastq_prep_cl(data, read_num=1)
fq2_bgzip_cmd = '%s -c /dev/stdin > %s' % (bgzip, out_file_2)
if prep_cmd:
fq2_bgzip_cmd = prep_cmd + ' | ' + fq2_bgzip_cmd # depends on [control=['if'], data=[]]
out_str = 'F=>({fq1_bgzip_cmd}) F2=>({fq2_bgzip_cmd}) S=/dev/null O=/dev/null O2=/dev/null collate=1 colsbs={max_mem}' # depends on [control=['if'], data=[]]
else:
out_str = 'S=>({fq1_bgzip_cmd})'
bam_file = objectstore.cl_input(bam_file)
extra_opts = ' '.join([str(x) for x in resources.get('options', [])])
cmd = '{bamtofastq} filename={bam_file} T={sortprefix} {extra_opts} ' + out_str
try:
do.run(cmd.format(**locals()), 'BAM to bgzipped fastq', checks=[do.file_reasonable_size(tx_out_file, bam_file)], log_error=False) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as msg:
if not is_retry and 'deflate failed' in str(msg):
logger.info('bamtofastq deflate IO failure preparing %s. Retrying with single core.' % bam_file)
needs_retry = True # depends on [control=['if'], data=[]]
else:
logger.exception()
raise # depends on [control=['except'], data=['msg']] # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
if needs_retry:
return _bgzip_from_bam(bam_file, dirs, data, is_retry=True) # depends on [control=['if'], data=[]]
else:
return [x for x in [out_file_1, out_file_2] if x is not None and utils.file_exists(x)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.