code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get_json_history(self, item=None, nb=0):
"""Return the history (JSON format).
- the stats history (dict of list) if item is None
- the stats history for the given item (list) instead
- None if item did not exist in the history
Limit to lasts nb items (all if nb=0)
"""
s = self.stats_history.get_json(nb=nb)
if item is None:
return s
else:
if item in s:
return s[item]
else:
return None | def function[get_json_history, parameter[self, item, nb]]:
constant[Return the history (JSON format).
- the stats history (dict of list) if item is None
- the stats history for the given item (list) instead
- None if item did not exist in the history
Limit to lasts nb items (all if nb=0)
]
variable[s] assign[=] call[name[self].stats_history.get_json, parameter[]]
if compare[name[item] is constant[None]] begin[:]
return[name[s]] | keyword[def] identifier[get_json_history] ( identifier[self] , identifier[item] = keyword[None] , identifier[nb] = literal[int] ):
literal[string]
identifier[s] = identifier[self] . identifier[stats_history] . identifier[get_json] ( identifier[nb] = identifier[nb] )
keyword[if] identifier[item] keyword[is] keyword[None] :
keyword[return] identifier[s]
keyword[else] :
keyword[if] identifier[item] keyword[in] identifier[s] :
keyword[return] identifier[s] [ identifier[item] ]
keyword[else] :
keyword[return] keyword[None] | def get_json_history(self, item=None, nb=0):
"""Return the history (JSON format).
- the stats history (dict of list) if item is None
- the stats history for the given item (list) instead
- None if item did not exist in the history
Limit to lasts nb items (all if nb=0)
"""
s = self.stats_history.get_json(nb=nb)
if item is None:
return s # depends on [control=['if'], data=[]]
elif item in s:
return s[item] # depends on [control=['if'], data=['item', 's']]
else:
return None |
def push_notification_devices_destroy_many(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/push_notification_devices#bulk-unregister-push-notification-devices"
api_path = "/api/v2/push_notification_devices/destroy_many.json"
return self.call(api_path, method="POST", data=data, **kwargs) | def function[push_notification_devices_destroy_many, parameter[self, data]]:
constant[https://developer.zendesk.com/rest_api/docs/core/push_notification_devices#bulk-unregister-push-notification-devices]
variable[api_path] assign[=] constant[/api/v2/push_notification_devices/destroy_many.json]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[push_notification_devices_destroy_many] ( identifier[self] , identifier[data] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[method] = literal[string] , identifier[data] = identifier[data] ,** identifier[kwargs] ) | def push_notification_devices_destroy_many(self, data, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/core/push_notification_devices#bulk-unregister-push-notification-devices"""
api_path = '/api/v2/push_notification_devices/destroy_many.json'
return self.call(api_path, method='POST', data=data, **kwargs) |
async def status(self, *args, **kwargs):
"""
Get task status
Get task status structure from `taskId`
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["status"], *args, **kwargs) | <ast.AsyncFunctionDef object at 0x7da20c991fc0> | keyword[async] keyword[def] identifier[status] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] keyword[await] identifier[self] . identifier[_makeApiCall] ( identifier[self] . identifier[funcinfo] [ literal[string] ],* identifier[args] ,** identifier[kwargs] ) | async def status(self, *args, **kwargs):
"""
Get task status
Get task status structure from `taskId`
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo['status'], *args, **kwargs) |
def add_attribute(self, name, value):
"""
:param name: Name of the attribute
:type name: string
:param value: Value of the attribute
:type value: string
"""
self.attributes.append(Attribute(name, value)) | def function[add_attribute, parameter[self, name, value]]:
constant[
:param name: Name of the attribute
:type name: string
:param value: Value of the attribute
:type value: string
]
call[name[self].attributes.append, parameter[call[name[Attribute], parameter[name[name], name[value]]]]] | keyword[def] identifier[add_attribute] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
identifier[self] . identifier[attributes] . identifier[append] ( identifier[Attribute] ( identifier[name] , identifier[value] )) | def add_attribute(self, name, value):
"""
:param name: Name of the attribute
:type name: string
:param value: Value of the attribute
:type value: string
"""
self.attributes.append(Attribute(name, value)) |
def plot_hpd(
x,
y,
credible_interval=0.94,
color="C1",
circular=False,
smooth=True,
smooth_kwargs=None,
fill_kwargs=None,
plot_kwargs=None,
ax=None,
):
"""
Plot hpd intervals for regression data.
Parameters
----------
x : array-like
Values to plot
y : array-like
values from which to compute the hpd
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
color : str
Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
circular : bool, optional
Whether to compute the hpd taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
smooth : boolean
If True the result will be smoothed by first computing a linear interpolation of the data
over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
details
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
Keywords passed to HPD limits
ax : matplotlib axes
Returns
-------
ax : matplotlib axes
"""
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", color)
plot_kwargs.setdefault("alpha", 0)
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", color)
fill_kwargs.setdefault("alpha", 0.5)
if ax is None:
ax = gca()
hpd_ = hpd(y, credible_interval=credible_interval, circular=circular)
if smooth:
if smooth_kwargs is None:
smooth_kwargs = {}
smooth_kwargs.setdefault("window_length", 55)
smooth_kwargs.setdefault("polyorder", 2)
x_data = np.linspace(x.min(), x.max(), 200)
hpd_interp = griddata(x, hpd_, x_data)
y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs)
else:
idx = np.argsort(x)
x_data = x[idx]
y_data = hpd_[idx]
ax.plot(x_data, y_data, **plot_kwargs)
ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs)
return ax | def function[plot_hpd, parameter[x, y, credible_interval, color, circular, smooth, smooth_kwargs, fill_kwargs, plot_kwargs, ax]]:
constant[
Plot hpd intervals for regression data.
Parameters
----------
x : array-like
Values to plot
y : array-like
values from which to compute the hpd
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
color : str
Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
circular : bool, optional
Whether to compute the hpd taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
smooth : boolean
If True the result will be smoothed by first computing a linear interpolation of the data
over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
details
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
Keywords passed to HPD limits
ax : matplotlib axes
Returns
-------
ax : matplotlib axes
]
if compare[name[plot_kwargs] is constant[None]] begin[:]
variable[plot_kwargs] assign[=] dictionary[[], []]
call[name[plot_kwargs].setdefault, parameter[constant[color], name[color]]]
call[name[plot_kwargs].setdefault, parameter[constant[alpha], constant[0]]]
if compare[name[fill_kwargs] is constant[None]] begin[:]
variable[fill_kwargs] assign[=] dictionary[[], []]
call[name[fill_kwargs].setdefault, parameter[constant[color], name[color]]]
call[name[fill_kwargs].setdefault, parameter[constant[alpha], constant[0.5]]]
if compare[name[ax] is constant[None]] begin[:]
variable[ax] assign[=] call[name[gca], parameter[]]
variable[hpd_] assign[=] call[name[hpd], parameter[name[y]]]
if name[smooth] begin[:]
if compare[name[smooth_kwargs] is constant[None]] begin[:]
variable[smooth_kwargs] assign[=] dictionary[[], []]
call[name[smooth_kwargs].setdefault, parameter[constant[window_length], constant[55]]]
call[name[smooth_kwargs].setdefault, parameter[constant[polyorder], constant[2]]]
variable[x_data] assign[=] call[name[np].linspace, parameter[call[name[x].min, parameter[]], call[name[x].max, parameter[]], constant[200]]]
variable[hpd_interp] assign[=] call[name[griddata], parameter[name[x], name[hpd_], name[x_data]]]
variable[y_data] assign[=] call[name[savgol_filter], parameter[name[hpd_interp]]]
call[name[ax].plot, parameter[name[x_data], name[y_data]]]
call[name[ax].fill_between, parameter[name[x_data], call[name[y_data]][tuple[[<ast.Slice object at 0x7da1b1bd6c20>, <ast.Constant object at 0x7da1b1bd6e00>]]], call[name[y_data]][tuple[[<ast.Slice object at 0x7da1b1bd6620>, <ast.Constant object at 0x7da1b1bd6230>]]]]]
return[name[ax]] | keyword[def] identifier[plot_hpd] (
identifier[x] ,
identifier[y] ,
identifier[credible_interval] = literal[int] ,
identifier[color] = literal[string] ,
identifier[circular] = keyword[False] ,
identifier[smooth] = keyword[True] ,
identifier[smooth_kwargs] = keyword[None] ,
identifier[fill_kwargs] = keyword[None] ,
identifier[plot_kwargs] = keyword[None] ,
identifier[ax] = keyword[None] ,
):
literal[string]
keyword[if] identifier[plot_kwargs] keyword[is] keyword[None] :
identifier[plot_kwargs] ={}
identifier[plot_kwargs] . identifier[setdefault] ( literal[string] , identifier[color] )
identifier[plot_kwargs] . identifier[setdefault] ( literal[string] , literal[int] )
keyword[if] identifier[fill_kwargs] keyword[is] keyword[None] :
identifier[fill_kwargs] ={}
identifier[fill_kwargs] . identifier[setdefault] ( literal[string] , identifier[color] )
identifier[fill_kwargs] . identifier[setdefault] ( literal[string] , literal[int] )
keyword[if] identifier[ax] keyword[is] keyword[None] :
identifier[ax] = identifier[gca] ()
identifier[hpd_] = identifier[hpd] ( identifier[y] , identifier[credible_interval] = identifier[credible_interval] , identifier[circular] = identifier[circular] )
keyword[if] identifier[smooth] :
keyword[if] identifier[smooth_kwargs] keyword[is] keyword[None] :
identifier[smooth_kwargs] ={}
identifier[smooth_kwargs] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[smooth_kwargs] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[x_data] = identifier[np] . identifier[linspace] ( identifier[x] . identifier[min] (), identifier[x] . identifier[max] (), literal[int] )
identifier[hpd_interp] = identifier[griddata] ( identifier[x] , identifier[hpd_] , identifier[x_data] )
identifier[y_data] = identifier[savgol_filter] ( identifier[hpd_interp] , identifier[axis] = literal[int] ,** identifier[smooth_kwargs] )
keyword[else] :
identifier[idx] = identifier[np] . identifier[argsort] ( identifier[x] )
identifier[x_data] = identifier[x] [ identifier[idx] ]
identifier[y_data] = identifier[hpd_] [ identifier[idx] ]
identifier[ax] . identifier[plot] ( identifier[x_data] , identifier[y_data] ,** identifier[plot_kwargs] )
identifier[ax] . identifier[fill_between] ( identifier[x_data] , identifier[y_data] [:, literal[int] ], identifier[y_data] [:, literal[int] ],** identifier[fill_kwargs] )
keyword[return] identifier[ax] | def plot_hpd(x, y, credible_interval=0.94, color='C1', circular=False, smooth=True, smooth_kwargs=None, fill_kwargs=None, plot_kwargs=None, ax=None):
"""
Plot hpd intervals for regression data.
Parameters
----------
x : array-like
Values to plot
y : array-like
values \u200b\u200bfrom which to compute the hpd
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
color : str
Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
circular : bool, optional
Whether to compute the hpd taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
smooth : boolean
If True the result will be smoothed by first computing a linear interpolation of the data
over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
details
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
Keywords passed to HPD limits
ax : matplotlib axes
Returns
-------
ax : matplotlib axes
"""
if plot_kwargs is None:
plot_kwargs = {} # depends on [control=['if'], data=['plot_kwargs']]
plot_kwargs.setdefault('color', color)
plot_kwargs.setdefault('alpha', 0)
if fill_kwargs is None:
fill_kwargs = {} # depends on [control=['if'], data=['fill_kwargs']]
fill_kwargs.setdefault('color', color)
fill_kwargs.setdefault('alpha', 0.5)
if ax is None:
ax = gca() # depends on [control=['if'], data=['ax']]
hpd_ = hpd(y, credible_interval=credible_interval, circular=circular)
if smooth:
if smooth_kwargs is None:
smooth_kwargs = {} # depends on [control=['if'], data=['smooth_kwargs']]
smooth_kwargs.setdefault('window_length', 55)
smooth_kwargs.setdefault('polyorder', 2)
x_data = np.linspace(x.min(), x.max(), 200)
hpd_interp = griddata(x, hpd_, x_data)
y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs) # depends on [control=['if'], data=[]]
else:
idx = np.argsort(x)
x_data = x[idx]
y_data = hpd_[idx]
ax.plot(x_data, y_data, **plot_kwargs)
ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs)
return ax |
def sport_update(self, sport_id, names=[], account=None, **kwargs):
""" Update a sport. This needs to be **proposed**.
:param str sport_id: The id of the sport to update
:param list names: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
assert isinstance(names, list)
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
sport = Sport(sport_id)
op = operations.Sport_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"sport_id": sport["id"],
"new_name": names,
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | def function[sport_update, parameter[self, sport_id, names, account]]:
constant[ Update a sport. This needs to be **proposed**.
:param str sport_id: The id of the sport to update
:param list names: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
]
assert[call[name[isinstance], parameter[name[names], name[list]]]]
if <ast.UnaryOp object at 0x7da1b102a680> begin[:]
if compare[constant[default_account] in name[self].config] begin[:]
variable[account] assign[=] call[name[self].config][constant[default_account]]
if <ast.UnaryOp object at 0x7da1b10284f0> begin[:]
<ast.Raise object at 0x7da1b1028f70>
variable[account] assign[=] call[name[Account], parameter[name[account]]]
variable[sport] assign[=] call[name[Sport], parameter[name[sport_id]]]
variable[op] assign[=] call[name[operations].Sport_update, parameter[]]
return[call[name[self].finalizeOp, parameter[name[op], call[name[account]][constant[name]], constant[active]]]] | keyword[def] identifier[sport_update] ( identifier[self] , identifier[sport_id] , identifier[names] =[], identifier[account] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[names] , identifier[list] )
keyword[if] keyword[not] identifier[account] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[config] :
identifier[account] = identifier[self] . identifier[config] [ literal[string] ]
keyword[if] keyword[not] identifier[account] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[account] = identifier[Account] ( identifier[account] )
identifier[sport] = identifier[Sport] ( identifier[sport_id] )
identifier[op] = identifier[operations] . identifier[Sport_update] (
**{
literal[string] :{ literal[string] : literal[int] , literal[string] : literal[string] },
literal[string] : identifier[sport] [ literal[string] ],
literal[string] : identifier[names] ,
literal[string] : identifier[self] . identifier[prefix] ,
}
)
keyword[return] identifier[self] . identifier[finalizeOp] ( identifier[op] , identifier[account] [ literal[string] ], literal[string] ,** identifier[kwargs] ) | def sport_update(self, sport_id, names=[], account=None, **kwargs):
""" Update a sport. This needs to be **proposed**.
:param str sport_id: The id of the sport to update
:param list names: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
assert isinstance(names, list)
if not account:
if 'default_account' in self.config:
account = self.config['default_account'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not account:
raise ValueError('You need to provide an account') # depends on [control=['if'], data=[]]
account = Account(account)
sport = Sport(sport_id)
op = operations.Sport_update(**{'fee': {'amount': 0, 'asset_id': '1.3.0'}, 'sport_id': sport['id'], 'new_name': names, 'prefix': self.prefix})
return self.finalizeOp(op, account['name'], 'active', **kwargs) |
def _clip_gradient_op(dtype):
"""Create an op that clips gradients using a Defun.
The tensorflow Defun decorator creates an op and tensorflow caches these op
automatically according to `func_name`. Using a Defun decorator twice with the
same `func_name` does not create a new op, instead the cached op is used.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The min and max clip values are given as arguments for the
forward pass method so that they can be used in the backwards pass.
Args:
dtype: the dtype of the net whose gradient is being clipped.
Returns:
The op that clips gradients.
"""
def clip_gradient_backward(op, grad):
clip_value_min = op.inputs[1]
clip_value_max = op.inputs[2]
clipped_grad = tf.clip_by_value(grad, clip_value_min, clip_value_max)
return clipped_grad, None, None
def clip_gradient_forward(x, clip_value_min, clip_value_max):
del clip_value_min # Unused.
del clip_value_max # Unused.
return x
func_name = "ClipGradient_{}".format(dtype.name)
return function.Defun(
dtype, dtype, dtype,
python_grad_func=clip_gradient_backward,
func_name=func_name)(clip_gradient_forward) | def function[_clip_gradient_op, parameter[dtype]]:
constant[Create an op that clips gradients using a Defun.
The tensorflow Defun decorator creates an op and tensorflow caches these op
automatically according to `func_name`. Using a Defun decorator twice with the
same `func_name` does not create a new op, instead the cached op is used.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The min and max clip values are given as arguments for the
forward pass method so that they can be used in the backwards pass.
Args:
dtype: the dtype of the net whose gradient is being clipped.
Returns:
The op that clips gradients.
]
def function[clip_gradient_backward, parameter[op, grad]]:
variable[clip_value_min] assign[=] call[name[op].inputs][constant[1]]
variable[clip_value_max] assign[=] call[name[op].inputs][constant[2]]
variable[clipped_grad] assign[=] call[name[tf].clip_by_value, parameter[name[grad], name[clip_value_min], name[clip_value_max]]]
return[tuple[[<ast.Name object at 0x7da1b1c60730>, <ast.Constant object at 0x7da1b1c61840>, <ast.Constant object at 0x7da1b1c609a0>]]]
def function[clip_gradient_forward, parameter[x, clip_value_min, clip_value_max]]:
<ast.Delete object at 0x7da1b1c62560>
<ast.Delete object at 0x7da1b1c610c0>
return[name[x]]
variable[func_name] assign[=] call[constant[ClipGradient_{}].format, parameter[name[dtype].name]]
return[call[call[name[function].Defun, parameter[name[dtype], name[dtype], name[dtype]]], parameter[name[clip_gradient_forward]]]] | keyword[def] identifier[_clip_gradient_op] ( identifier[dtype] ):
literal[string]
keyword[def] identifier[clip_gradient_backward] ( identifier[op] , identifier[grad] ):
identifier[clip_value_min] = identifier[op] . identifier[inputs] [ literal[int] ]
identifier[clip_value_max] = identifier[op] . identifier[inputs] [ literal[int] ]
identifier[clipped_grad] = identifier[tf] . identifier[clip_by_value] ( identifier[grad] , identifier[clip_value_min] , identifier[clip_value_max] )
keyword[return] identifier[clipped_grad] , keyword[None] , keyword[None]
keyword[def] identifier[clip_gradient_forward] ( identifier[x] , identifier[clip_value_min] , identifier[clip_value_max] ):
keyword[del] identifier[clip_value_min]
keyword[del] identifier[clip_value_max]
keyword[return] identifier[x]
identifier[func_name] = literal[string] . identifier[format] ( identifier[dtype] . identifier[name] )
keyword[return] identifier[function] . identifier[Defun] (
identifier[dtype] , identifier[dtype] , identifier[dtype] ,
identifier[python_grad_func] = identifier[clip_gradient_backward] ,
identifier[func_name] = identifier[func_name] )( identifier[clip_gradient_forward] ) | def _clip_gradient_op(dtype):
"""Create an op that clips gradients using a Defun.
The tensorflow Defun decorator creates an op and tensorflow caches these op
automatically according to `func_name`. Using a Defun decorator twice with the
same `func_name` does not create a new op, instead the cached op is used.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The min and max clip values are given as arguments for the
forward pass method so that they can be used in the backwards pass.
Args:
dtype: the dtype of the net whose gradient is being clipped.
Returns:
The op that clips gradients.
"""
def clip_gradient_backward(op, grad):
clip_value_min = op.inputs[1]
clip_value_max = op.inputs[2]
clipped_grad = tf.clip_by_value(grad, clip_value_min, clip_value_max)
return (clipped_grad, None, None)
def clip_gradient_forward(x, clip_value_min, clip_value_max):
del clip_value_min # Unused.
del clip_value_max # Unused.
return x
func_name = 'ClipGradient_{}'.format(dtype.name)
return function.Defun(dtype, dtype, dtype, python_grad_func=clip_gradient_backward, func_name=func_name)(clip_gradient_forward) |
def _reverse(self):
""" reverse patch direction (this doesn't touch filenames) """
for p in self.items:
for h in p.hunks:
h.startsrc, h.starttgt = h.starttgt, h.startsrc
h.linessrc, h.linestgt = h.linestgt, h.linessrc
for i,line in enumerate(h.text):
# need to use line[0:1] here, because line[0]
# returns int instead of bytes on Python 3
if line[0:1] == b'+':
h.text[i] = b'-' + line[1:]
elif line[0:1] == b'-':
h.text[i] = b'+' +line[1:] | def function[_reverse, parameter[self]]:
constant[ reverse patch direction (this doesn't touch filenames) ]
for taget[name[p]] in starred[name[self].items] begin[:]
for taget[name[h]] in starred[name[p].hunks] begin[:]
<ast.Tuple object at 0x7da1b2207cd0> assign[=] tuple[[<ast.Attribute object at 0x7da1b2207bb0>, <ast.Attribute object at 0x7da1b2207b50>]]
<ast.Tuple object at 0x7da1b2256320> assign[=] tuple[[<ast.Attribute object at 0x7da1b2256440>, <ast.Attribute object at 0x7da1b2256410>]]
for taget[tuple[[<ast.Name object at 0x7da1b2256530>, <ast.Name object at 0x7da1b2256500>]]] in starred[call[name[enumerate], parameter[name[h].text]]] begin[:]
if compare[call[name[line]][<ast.Slice object at 0x7da1b2254640>] equal[==] constant[b'+']] begin[:]
call[name[h].text][name[i]] assign[=] binary_operation[constant[b'-'] + call[name[line]][<ast.Slice object at 0x7da1b2257af0>]] | keyword[def] identifier[_reverse] ( identifier[self] ):
literal[string]
keyword[for] identifier[p] keyword[in] identifier[self] . identifier[items] :
keyword[for] identifier[h] keyword[in] identifier[p] . identifier[hunks] :
identifier[h] . identifier[startsrc] , identifier[h] . identifier[starttgt] = identifier[h] . identifier[starttgt] , identifier[h] . identifier[startsrc]
identifier[h] . identifier[linessrc] , identifier[h] . identifier[linestgt] = identifier[h] . identifier[linestgt] , identifier[h] . identifier[linessrc]
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[h] . identifier[text] ):
keyword[if] identifier[line] [ literal[int] : literal[int] ]== literal[string] :
identifier[h] . identifier[text] [ identifier[i] ]= literal[string] + identifier[line] [ literal[int] :]
keyword[elif] identifier[line] [ literal[int] : literal[int] ]== literal[string] :
identifier[h] . identifier[text] [ identifier[i] ]= literal[string] + identifier[line] [ literal[int] :] | def _reverse(self):
""" reverse patch direction (this doesn't touch filenames) """
for p in self.items:
for h in p.hunks:
(h.startsrc, h.starttgt) = (h.starttgt, h.startsrc)
(h.linessrc, h.linestgt) = (h.linestgt, h.linessrc)
for (i, line) in enumerate(h.text):
# need to use line[0:1] here, because line[0]
# returns int instead of bytes on Python 3
if line[0:1] == b'+':
h.text[i] = b'-' + line[1:] # depends on [control=['if'], data=[]]
elif line[0:1] == b'-':
h.text[i] = b'+' + line[1:] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['h']] # depends on [control=['for'], data=['p']] |
def copy_tier(self, eaf_obj, tier_name):
"""Copies a tier to another :class:`pympi.Elan.Eaf` object.
:param pympi.Elan.Eaf eaf_obj: Target Eaf object.
:param str tier_name: Name of the tier.
:raises KeyError: If the tier doesn't exist.
"""
if tier_name in eaf_obj.get_tier_names():
eaf_obj.remove_tier(tier_name)
eaf_obj.add_tier(tier_name,
tier_dict=self.get_parameters_for_tier(tier_name))
for ann in self.get_annotation_data_for_tier(tier_name):
eaf_obj.insert_annotation(tier_name, ann[0], ann[1], ann[2]) | def function[copy_tier, parameter[self, eaf_obj, tier_name]]:
constant[Copies a tier to another :class:`pympi.Elan.Eaf` object.
:param pympi.Elan.Eaf eaf_obj: Target Eaf object.
:param str tier_name: Name of the tier.
:raises KeyError: If the tier doesn't exist.
]
if compare[name[tier_name] in call[name[eaf_obj].get_tier_names, parameter[]]] begin[:]
call[name[eaf_obj].remove_tier, parameter[name[tier_name]]]
call[name[eaf_obj].add_tier, parameter[name[tier_name]]]
for taget[name[ann]] in starred[call[name[self].get_annotation_data_for_tier, parameter[name[tier_name]]]] begin[:]
call[name[eaf_obj].insert_annotation, parameter[name[tier_name], call[name[ann]][constant[0]], call[name[ann]][constant[1]], call[name[ann]][constant[2]]]] | keyword[def] identifier[copy_tier] ( identifier[self] , identifier[eaf_obj] , identifier[tier_name] ):
literal[string]
keyword[if] identifier[tier_name] keyword[in] identifier[eaf_obj] . identifier[get_tier_names] ():
identifier[eaf_obj] . identifier[remove_tier] ( identifier[tier_name] )
identifier[eaf_obj] . identifier[add_tier] ( identifier[tier_name] ,
identifier[tier_dict] = identifier[self] . identifier[get_parameters_for_tier] ( identifier[tier_name] ))
keyword[for] identifier[ann] keyword[in] identifier[self] . identifier[get_annotation_data_for_tier] ( identifier[tier_name] ):
identifier[eaf_obj] . identifier[insert_annotation] ( identifier[tier_name] , identifier[ann] [ literal[int] ], identifier[ann] [ literal[int] ], identifier[ann] [ literal[int] ]) | def copy_tier(self, eaf_obj, tier_name):
"""Copies a tier to another :class:`pympi.Elan.Eaf` object.
:param pympi.Elan.Eaf eaf_obj: Target Eaf object.
:param str tier_name: Name of the tier.
:raises KeyError: If the tier doesn't exist.
"""
if tier_name in eaf_obj.get_tier_names():
eaf_obj.remove_tier(tier_name) # depends on [control=['if'], data=['tier_name']]
eaf_obj.add_tier(tier_name, tier_dict=self.get_parameters_for_tier(tier_name))
for ann in self.get_annotation_data_for_tier(tier_name):
eaf_obj.insert_annotation(tier_name, ann[0], ann[1], ann[2]) # depends on [control=['for'], data=['ann']] |
def _data2rec(schema, rec_data):
'''
schema = OrderedDict({
'prio': int,
'weight': int,
'port': to_port,
'name': str,
})
rec_data = '10 20 25 myawesome.nl'
res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'}
'''
try:
rec_fields = rec_data.split(' ')
# spaces in digest fields are allowed
assert len(rec_fields) >= len(schema)
if len(rec_fields) > len(schema):
cutoff = len(schema) - 1
rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])]
if len(schema) == 1:
res = _cast(rec_fields[0], next(iter(schema.values())))
else:
res = dict((
(field_name, _cast(rec_field, rec_cast))
for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields)
))
return res
except (AssertionError, AttributeError, TypeError, ValueError) as e:
raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format(
rec_data,
e,
' '.join(schema.keys())
)) | def function[_data2rec, parameter[schema, rec_data]]:
constant[
schema = OrderedDict({
'prio': int,
'weight': int,
'port': to_port,
'name': str,
})
rec_data = '10 20 25 myawesome.nl'
res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'}
]
<ast.Try object at 0x7da1b208b4c0> | keyword[def] identifier[_data2rec] ( identifier[schema] , identifier[rec_data] ):
literal[string]
keyword[try] :
identifier[rec_fields] = identifier[rec_data] . identifier[split] ( literal[string] )
keyword[assert] identifier[len] ( identifier[rec_fields] )>= identifier[len] ( identifier[schema] )
keyword[if] identifier[len] ( identifier[rec_fields] )> identifier[len] ( identifier[schema] ):
identifier[cutoff] = identifier[len] ( identifier[schema] )- literal[int]
identifier[rec_fields] = identifier[rec_fields] [ literal[int] : identifier[cutoff] ]+[ literal[string] . identifier[join] ( identifier[rec_fields] [ identifier[cutoff] :])]
keyword[if] identifier[len] ( identifier[schema] )== literal[int] :
identifier[res] = identifier[_cast] ( identifier[rec_fields] [ literal[int] ], identifier[next] ( identifier[iter] ( identifier[schema] . identifier[values] ())))
keyword[else] :
identifier[res] = identifier[dict] ((
( identifier[field_name] , identifier[_cast] ( identifier[rec_field] , identifier[rec_cast] ))
keyword[for] ( identifier[field_name] , identifier[rec_cast] ), identifier[rec_field] keyword[in] identifier[zip] ( identifier[schema] . identifier[items] (), identifier[rec_fields] )
))
keyword[return] identifier[res]
keyword[except] ( identifier[AssertionError] , identifier[AttributeError] , identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[e] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[rec_data] ,
identifier[e] ,
literal[string] . identifier[join] ( identifier[schema] . identifier[keys] ())
)) | def _data2rec(schema, rec_data):
"""
schema = OrderedDict({
'prio': int,
'weight': int,
'port': to_port,
'name': str,
})
rec_data = '10 20 25 myawesome.nl'
res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'}
"""
try:
rec_fields = rec_data.split(' ')
# spaces in digest fields are allowed
assert len(rec_fields) >= len(schema)
if len(rec_fields) > len(schema):
cutoff = len(schema) - 1
rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] # depends on [control=['if'], data=[]]
if len(schema) == 1:
res = _cast(rec_fields[0], next(iter(schema.values()))) # depends on [control=['if'], data=[]]
else:
res = dict(((field_name, _cast(rec_field, rec_cast)) for ((field_name, rec_cast), rec_field) in zip(schema.items(), rec_fields)))
return res # depends on [control=['try'], data=[]]
except (AssertionError, AttributeError, TypeError, ValueError) as e:
raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format(rec_data, e, ' '.join(schema.keys()))) # depends on [control=['except'], data=['e']] |
def _default_docx_path():
"""
Return the path to the built-in default .docx package.
"""
_thisdir = os.path.split(__file__)[0]
return os.path.join(_thisdir, 'templates', 'default-docx-template') | def function[_default_docx_path, parameter[]]:
constant[
Return the path to the built-in default .docx package.
]
variable[_thisdir] assign[=] call[call[name[os].path.split, parameter[name[__file__]]]][constant[0]]
return[call[name[os].path.join, parameter[name[_thisdir], constant[templates], constant[default-docx-template]]]] | keyword[def] identifier[_default_docx_path] ():
literal[string]
identifier[_thisdir] = identifier[os] . identifier[path] . identifier[split] ( identifier[__file__] )[ literal[int] ]
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[_thisdir] , literal[string] , literal[string] ) | def _default_docx_path():
"""
Return the path to the built-in default .docx package.
"""
_thisdir = os.path.split(__file__)[0]
return os.path.join(_thisdir, 'templates', 'default-docx-template') |
def get_module_by_name(self, modName):
"""
@type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found.
"""
# Convert modName to lowercase.
# This helps make case insensitive string comparisons.
modName = modName.lower()
# modName is an absolute pathname.
if PathOperations.path_is_absolute(modName):
for lib in self.iter_modules():
if modName == lib.get_filename().lower():
return lib
return None # Stop trying to match the name.
# Get all the module names.
# This prevents having to iterate through the module list
# more than once.
modDict = [ ( lib.get_name(), lib ) for lib in self.iter_modules() ]
modDict = dict(modDict)
# modName is a base filename.
if modName in modDict:
return modDict[modName]
# modName is a base filename without extension.
filepart, extpart = PathOperations.split_extension(modName)
if filepart and extpart:
if filepart in modDict:
return modDict[filepart]
# modName is a base address.
try:
baseAddress = HexInput.integer(modName)
except ValueError:
return None
if self.has_module(baseAddress):
return self.get_module(baseAddress)
# Module not found.
return None | def function[get_module_by_name, parameter[self, modName]]:
constant[
@type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found.
]
variable[modName] assign[=] call[name[modName].lower, parameter[]]
if call[name[PathOperations].path_is_absolute, parameter[name[modName]]] begin[:]
for taget[name[lib]] in starred[call[name[self].iter_modules, parameter[]]] begin[:]
if compare[name[modName] equal[==] call[call[name[lib].get_filename, parameter[]].lower, parameter[]]] begin[:]
return[name[lib]]
return[constant[None]]
variable[modDict] assign[=] <ast.ListComp object at 0x7da1b06fa710>
variable[modDict] assign[=] call[name[dict], parameter[name[modDict]]]
if compare[name[modName] in name[modDict]] begin[:]
return[call[name[modDict]][name[modName]]]
<ast.Tuple object at 0x7da1b06fb940> assign[=] call[name[PathOperations].split_extension, parameter[name[modName]]]
if <ast.BoolOp object at 0x7da1b06face0> begin[:]
if compare[name[filepart] in name[modDict]] begin[:]
return[call[name[modDict]][name[filepart]]]
<ast.Try object at 0x7da1b06fba00>
if call[name[self].has_module, parameter[name[baseAddress]]] begin[:]
return[call[name[self].get_module, parameter[name[baseAddress]]]]
return[constant[None]] | keyword[def] identifier[get_module_by_name] ( identifier[self] , identifier[modName] ):
literal[string]
identifier[modName] = identifier[modName] . identifier[lower] ()
keyword[if] identifier[PathOperations] . identifier[path_is_absolute] ( identifier[modName] ):
keyword[for] identifier[lib] keyword[in] identifier[self] . identifier[iter_modules] ():
keyword[if] identifier[modName] == identifier[lib] . identifier[get_filename] (). identifier[lower] ():
keyword[return] identifier[lib]
keyword[return] keyword[None]
identifier[modDict] =[( identifier[lib] . identifier[get_name] (), identifier[lib] ) keyword[for] identifier[lib] keyword[in] identifier[self] . identifier[iter_modules] ()]
identifier[modDict] = identifier[dict] ( identifier[modDict] )
keyword[if] identifier[modName] keyword[in] identifier[modDict] :
keyword[return] identifier[modDict] [ identifier[modName] ]
identifier[filepart] , identifier[extpart] = identifier[PathOperations] . identifier[split_extension] ( identifier[modName] )
keyword[if] identifier[filepart] keyword[and] identifier[extpart] :
keyword[if] identifier[filepart] keyword[in] identifier[modDict] :
keyword[return] identifier[modDict] [ identifier[filepart] ]
keyword[try] :
identifier[baseAddress] = identifier[HexInput] . identifier[integer] ( identifier[modName] )
keyword[except] identifier[ValueError] :
keyword[return] keyword[None]
keyword[if] identifier[self] . identifier[has_module] ( identifier[baseAddress] ):
keyword[return] identifier[self] . identifier[get_module] ( identifier[baseAddress] )
keyword[return] keyword[None] | def get_module_by_name(self, modName):
"""
@type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found.
"""
# Convert modName to lowercase.
# This helps make case insensitive string comparisons.
modName = modName.lower()
# modName is an absolute pathname.
if PathOperations.path_is_absolute(modName):
for lib in self.iter_modules():
if modName == lib.get_filename().lower():
return lib # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['lib']]
return None # Stop trying to match the name. # depends on [control=['if'], data=[]]
# Get all the module names.
# This prevents having to iterate through the module list
# more than once.
modDict = [(lib.get_name(), lib) for lib in self.iter_modules()]
modDict = dict(modDict)
# modName is a base filename.
if modName in modDict:
return modDict[modName] # depends on [control=['if'], data=['modName', 'modDict']]
# modName is a base filename without extension.
(filepart, extpart) = PathOperations.split_extension(modName)
if filepart and extpart:
if filepart in modDict:
return modDict[filepart] # depends on [control=['if'], data=['filepart', 'modDict']] # depends on [control=['if'], data=[]]
# modName is a base address.
try:
baseAddress = HexInput.integer(modName) # depends on [control=['try'], data=[]]
except ValueError:
return None # depends on [control=['except'], data=[]]
if self.has_module(baseAddress):
return self.get_module(baseAddress) # depends on [control=['if'], data=[]]
# Module not found.
return None |
def text_changed(self):
"""Text has changed"""
# Save text as bytes, if it was initially bytes
if self.is_binary:
self.text = to_binary_string(self.edit.toPlainText(), 'utf8')
else:
self.text = to_text_string(self.edit.toPlainText())
if self.btn_save_and_close:
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True) | def function[text_changed, parameter[self]]:
constant[Text has changed]
if name[self].is_binary begin[:]
name[self].text assign[=] call[name[to_binary_string], parameter[call[name[self].edit.toPlainText, parameter[]], constant[utf8]]]
if name[self].btn_save_and_close begin[:]
call[name[self].btn_save_and_close.setEnabled, parameter[constant[True]]]
call[name[self].btn_save_and_close.setAutoDefault, parameter[constant[True]]]
call[name[self].btn_save_and_close.setDefault, parameter[constant[True]]] | keyword[def] identifier[text_changed] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_binary] :
identifier[self] . identifier[text] = identifier[to_binary_string] ( identifier[self] . identifier[edit] . identifier[toPlainText] (), literal[string] )
keyword[else] :
identifier[self] . identifier[text] = identifier[to_text_string] ( identifier[self] . identifier[edit] . identifier[toPlainText] ())
keyword[if] identifier[self] . identifier[btn_save_and_close] :
identifier[self] . identifier[btn_save_and_close] . identifier[setEnabled] ( keyword[True] )
identifier[self] . identifier[btn_save_and_close] . identifier[setAutoDefault] ( keyword[True] )
identifier[self] . identifier[btn_save_and_close] . identifier[setDefault] ( keyword[True] ) | def text_changed(self):
"""Text has changed""" # Save text as bytes, if it was initially bytes
if self.is_binary:
self.text = to_binary_string(self.edit.toPlainText(), 'utf8') # depends on [control=['if'], data=[]]
else:
self.text = to_text_string(self.edit.toPlainText())
if self.btn_save_and_close:
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True) # depends on [control=['if'], data=[]] |
def delete_keyvault(access_token, subscription_id, rgname, vault_name):
'''Deletes a key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
return do_delete(endpoint, access_token) | def function[delete_keyvault, parameter[access_token, subscription_id, rgname, vault_name]]:
constant[Deletes a key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
Returns:
HTTP response. 200 OK.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b0534160>, <ast.Constant object at 0x7da1b0535c00>, <ast.Name object at 0x7da1b0534d90>, <ast.Constant object at 0x7da1b05376a0>, <ast.Name object at 0x7da1b0535420>, <ast.Constant object at 0x7da1b0536980>, <ast.Name object at 0x7da1b0534a00>, <ast.Constant object at 0x7da1b0536e60>, <ast.Name object at 0x7da1b054a890>]]]]
return[call[name[do_delete], parameter[name[endpoint], name[access_token]]]] | keyword[def] identifier[delete_keyvault] ( identifier[access_token] , identifier[subscription_id] , identifier[rgname] , identifier[vault_name] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] , identifier[rgname] ,
literal[string] , identifier[vault_name] ,
literal[string] , identifier[KEYVAULT_API] ])
keyword[return] identifier[do_delete] ( identifier[endpoint] , identifier[access_token] ) | def delete_keyvault(access_token, subscription_id, rgname, vault_name):
"""Deletes a key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
Returns:
HTTP response. 200 OK.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API])
return do_delete(endpoint, access_token) |
def delete(self, **kwargs):
"""
Performs a DELETE statement on the model's table in the master database.
:param where: The WHERE clause. This can be a plain string, a dict or an array.
:type where: string, dict, array
"""
kwargs['stack'] = self.stack_mark(inspect.stack())
return self.db_adapter(role='master').delete(**kwargs) | def function[delete, parameter[self]]:
constant[
Performs a DELETE statement on the model's table in the master database.
:param where: The WHERE clause. This can be a plain string, a dict or an array.
:type where: string, dict, array
]
call[name[kwargs]][constant[stack]] assign[=] call[name[self].stack_mark, parameter[call[name[inspect].stack, parameter[]]]]
return[call[call[name[self].db_adapter, parameter[]].delete, parameter[]]] | keyword[def] identifier[delete] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[stack_mark] ( identifier[inspect] . identifier[stack] ())
keyword[return] identifier[self] . identifier[db_adapter] ( identifier[role] = literal[string] ). identifier[delete] (** identifier[kwargs] ) | def delete(self, **kwargs):
"""
Performs a DELETE statement on the model's table in the master database.
:param where: The WHERE clause. This can be a plain string, a dict or an array.
:type where: string, dict, array
"""
kwargs['stack'] = self.stack_mark(inspect.stack())
return self.db_adapter(role='master').delete(**kwargs) |
def as_uni_errors(form):
"""
Renders only form errors like django-uni-form::
{% load uni_form_tags %}
{{ form|as_uni_errors }}
"""
if isinstance(form, BaseFormSet):
template = get_template('uni_form/errors_formset.html')
c = Context({'formset': form})
else:
template = get_template('uni_form/errors.html')
c = Context({'form':form})
return template.render(c) | def function[as_uni_errors, parameter[form]]:
constant[
Renders only form errors like django-uni-form::
{% load uni_form_tags %}
{{ form|as_uni_errors }}
]
if call[name[isinstance], parameter[name[form], name[BaseFormSet]]] begin[:]
variable[template] assign[=] call[name[get_template], parameter[constant[uni_form/errors_formset.html]]]
variable[c] assign[=] call[name[Context], parameter[dictionary[[<ast.Constant object at 0x7da20c991ed0>], [<ast.Name object at 0x7da20c9921a0>]]]]
return[call[name[template].render, parameter[name[c]]]] | keyword[def] identifier[as_uni_errors] ( identifier[form] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[form] , identifier[BaseFormSet] ):
identifier[template] = identifier[get_template] ( literal[string] )
identifier[c] = identifier[Context] ({ literal[string] : identifier[form] })
keyword[else] :
identifier[template] = identifier[get_template] ( literal[string] )
identifier[c] = identifier[Context] ({ literal[string] : identifier[form] })
keyword[return] identifier[template] . identifier[render] ( identifier[c] ) | def as_uni_errors(form):
"""
Renders only form errors like django-uni-form::
{% load uni_form_tags %}
{{ form|as_uni_errors }}
"""
if isinstance(form, BaseFormSet):
template = get_template('uni_form/errors_formset.html')
c = Context({'formset': form}) # depends on [control=['if'], data=[]]
else:
template = get_template('uni_form/errors.html')
c = Context({'form': form})
return template.render(c) |
def getArc(self, prom, sig):
""" Returns the arcs between a promissor and
a significator. Should uses the object creation
functions to build the objects.
"""
res = self._arc(prom, sig)
res.update({
'prom': prom['id'],
'sig': sig['id']
})
return res | def function[getArc, parameter[self, prom, sig]]:
constant[ Returns the arcs between a promissor and
a significator. Should uses the object creation
functions to build the objects.
]
variable[res] assign[=] call[name[self]._arc, parameter[name[prom], name[sig]]]
call[name[res].update, parameter[dictionary[[<ast.Constant object at 0x7da1b11a6e60>, <ast.Constant object at 0x7da1b11dcfd0>], [<ast.Subscript object at 0x7da1b11dff10>, <ast.Subscript object at 0x7da1b11e3a60>]]]]
return[name[res]] | keyword[def] identifier[getArc] ( identifier[self] , identifier[prom] , identifier[sig] ):
literal[string]
identifier[res] = identifier[self] . identifier[_arc] ( identifier[prom] , identifier[sig] )
identifier[res] . identifier[update] ({
literal[string] : identifier[prom] [ literal[string] ],
literal[string] : identifier[sig] [ literal[string] ]
})
keyword[return] identifier[res] | def getArc(self, prom, sig):
""" Returns the arcs between a promissor and
a significator. Should uses the object creation
functions to build the objects.
"""
res = self._arc(prom, sig)
res.update({'prom': prom['id'], 'sig': sig['id']})
return res |
def _progress_update(self, numerator_increment, stage=0, show_eta=True, **kw):
""" Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater
"""
if not self.show_progress:
return
self.__check_stage_registered(stage)
if not self._prog_rep_progressbars[stage]:
return
pg = self._prog_rep_progressbars[stage]
pg.update(int(numerator_increment)) | def function[_progress_update, parameter[self, numerator_increment, stage, show_eta]]:
constant[ Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater
]
if <ast.UnaryOp object at 0x7da2045675e0> begin[:]
return[None]
call[name[self].__check_stage_registered, parameter[name[stage]]]
if <ast.UnaryOp object at 0x7da204565180> begin[:]
return[None]
variable[pg] assign[=] call[name[self]._prog_rep_progressbars][name[stage]]
call[name[pg].update, parameter[call[name[int], parameter[name[numerator_increment]]]]] | keyword[def] identifier[_progress_update] ( identifier[self] , identifier[numerator_increment] , identifier[stage] = literal[int] , identifier[show_eta] = keyword[True] ,** identifier[kw] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[show_progress] :
keyword[return]
identifier[self] . identifier[__check_stage_registered] ( identifier[stage] )
keyword[if] keyword[not] identifier[self] . identifier[_prog_rep_progressbars] [ identifier[stage] ]:
keyword[return]
identifier[pg] = identifier[self] . identifier[_prog_rep_progressbars] [ identifier[stage] ]
identifier[pg] . identifier[update] ( identifier[int] ( identifier[numerator_increment] )) | def _progress_update(self, numerator_increment, stage=0, show_eta=True, **kw):
""" Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater
"""
if not self.show_progress:
return # depends on [control=['if'], data=[]]
self.__check_stage_registered(stage)
if not self._prog_rep_progressbars[stage]:
return # depends on [control=['if'], data=[]]
pg = self._prog_rep_progressbars[stage]
pg.update(int(numerator_increment)) |
def _single_qubit_accumulate_into_scratch(args: Dict[str, Any]):
"""Accumulates single qubit phase gates into the scratch shards."""
index = args['indices'][0]
shard_num = args['shard_num']
half_turns = args['half_turns']
num_shard_qubits = args['num_shard_qubits']
scratch = _scratch_shard(args)
# ExpZ = exp(-i pi Z half_turns / 2).
if index >= num_shard_qubits:
# Acts on prefix qubits.
sign = 1 - 2 * _kth_bit(shard_num, index - num_shard_qubits)
scratch -= half_turns * sign
else:
# Acts on shard qubits.
scratch -= half_turns * _pm_vects(args)[index] | def function[_single_qubit_accumulate_into_scratch, parameter[args]]:
constant[Accumulates single qubit phase gates into the scratch shards.]
variable[index] assign[=] call[call[name[args]][constant[indices]]][constant[0]]
variable[shard_num] assign[=] call[name[args]][constant[shard_num]]
variable[half_turns] assign[=] call[name[args]][constant[half_turns]]
variable[num_shard_qubits] assign[=] call[name[args]][constant[num_shard_qubits]]
variable[scratch] assign[=] call[name[_scratch_shard], parameter[name[args]]]
if compare[name[index] greater_or_equal[>=] name[num_shard_qubits]] begin[:]
variable[sign] assign[=] binary_operation[constant[1] - binary_operation[constant[2] * call[name[_kth_bit], parameter[name[shard_num], binary_operation[name[index] - name[num_shard_qubits]]]]]]
<ast.AugAssign object at 0x7da1b1cef130> | keyword[def] identifier[_single_qubit_accumulate_into_scratch] ( identifier[args] : identifier[Dict] [ identifier[str] , identifier[Any] ]):
literal[string]
identifier[index] = identifier[args] [ literal[string] ][ literal[int] ]
identifier[shard_num] = identifier[args] [ literal[string] ]
identifier[half_turns] = identifier[args] [ literal[string] ]
identifier[num_shard_qubits] = identifier[args] [ literal[string] ]
identifier[scratch] = identifier[_scratch_shard] ( identifier[args] )
keyword[if] identifier[index] >= identifier[num_shard_qubits] :
identifier[sign] = literal[int] - literal[int] * identifier[_kth_bit] ( identifier[shard_num] , identifier[index] - identifier[num_shard_qubits] )
identifier[scratch] -= identifier[half_turns] * identifier[sign]
keyword[else] :
identifier[scratch] -= identifier[half_turns] * identifier[_pm_vects] ( identifier[args] )[ identifier[index] ] | def _single_qubit_accumulate_into_scratch(args: Dict[str, Any]):
"""Accumulates single qubit phase gates into the scratch shards."""
index = args['indices'][0]
shard_num = args['shard_num']
half_turns = args['half_turns']
num_shard_qubits = args['num_shard_qubits']
scratch = _scratch_shard(args)
# ExpZ = exp(-i pi Z half_turns / 2).
if index >= num_shard_qubits:
# Acts on prefix qubits.
sign = 1 - 2 * _kth_bit(shard_num, index - num_shard_qubits)
scratch -= half_turns * sign # depends on [control=['if'], data=['index', 'num_shard_qubits']]
else:
# Acts on shard qubits.
scratch -= half_turns * _pm_vects(args)[index] |
def _unassembled_reads2_out_file_name(self):
"""Checks if file name is set for reads2 output.
Returns absolute path."""
if self.Parameters['-2'].isOn():
unassembled_reads2 = self._absolute(
str(self.Parameters['-2'].Value))
else:
raise ValueError("No reads2 (flag -2) output path specified")
return unassembled_reads2 | def function[_unassembled_reads2_out_file_name, parameter[self]]:
constant[Checks if file name is set for reads2 output.
Returns absolute path.]
if call[call[name[self].Parameters][constant[-2]].isOn, parameter[]] begin[:]
variable[unassembled_reads2] assign[=] call[name[self]._absolute, parameter[call[name[str], parameter[call[name[self].Parameters][constant[-2]].Value]]]]
return[name[unassembled_reads2]] | keyword[def] identifier[_unassembled_reads2_out_file_name] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[Parameters] [ literal[string] ]. identifier[isOn] ():
identifier[unassembled_reads2] = identifier[self] . identifier[_absolute] (
identifier[str] ( identifier[self] . identifier[Parameters] [ literal[string] ]. identifier[Value] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[unassembled_reads2] | def _unassembled_reads2_out_file_name(self):
"""Checks if file name is set for reads2 output.
Returns absolute path."""
if self.Parameters['-2'].isOn():
unassembled_reads2 = self._absolute(str(self.Parameters['-2'].Value)) # depends on [control=['if'], data=[]]
else:
raise ValueError('No reads2 (flag -2) output path specified')
return unassembled_reads2 |
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False | def function[point_in_multipolygon, parameter[point, multipoly]]:
constant[
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
]
variable[coords_array] assign[=] <ast.IfExp object at 0x7da1b1004790>
for taget[name[coords]] in starred[name[coords_array]] begin[:]
if call[name[_point_in_polygon], parameter[name[point], name[coords]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[point_in_multipolygon] ( identifier[point] , identifier[multipoly] ):
literal[string]
identifier[coords_array] =[ identifier[multipoly] [ literal[string] ]] keyword[if] identifier[multipoly] [
literal[string] ]== literal[string] keyword[else] identifier[multipoly] [ literal[string] ]
keyword[for] identifier[coords] keyword[in] identifier[coords_array] :
keyword[if] identifier[_point_in_polygon] ( identifier[point] , identifier[coords] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly['type'] == 'MultiPolygon' else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['coords']]
return False |
def add(envelope):
""" Take a dict-like fedmsg envelope and store the headers and message
in the table.
"""
message = envelope['body']
timestamp = message.get('timestamp', None)
try:
if timestamp:
timestamp = datetime.datetime.utcfromtimestamp(timestamp)
else:
timestamp = datetime.datetime.utcnow()
except Exception:
pass
headers = envelope.get('headers', None)
msg_id = message.get('msg_id', None)
if not msg_id and headers:
msg_id = headers.get('message-id', None)
if not msg_id:
msg_id = six.text_type(timestamp.year) + six.u('-') + six.text_type(uuid.uuid4())
obj = Message(
i=message.get('i', 0),
msg_id=msg_id,
topic=message['topic'],
timestamp=timestamp,
username=message.get('username', None),
crypto=message.get('crypto', None),
certificate=message.get('certificate', None),
signature=message.get('signature', None),
)
obj.msg = message['msg']
obj.headers = headers
try:
session.add(obj)
session.flush()
except IntegrityError:
log.warning('Skipping message from %s with duplicate id: %s',
message['topic'], msg_id)
session.rollback()
return
usernames = fedmsg.meta.msg2usernames(message)
packages = fedmsg.meta.msg2packages(message)
# Do a little sanity checking on fedmsg.meta results
if None in usernames:
# Notify developers so they can fix msg2usernames
log.error('NoneType found in usernames of %r' % msg_id)
# And prune out the bad value
usernames = [name for name in usernames if name is not None]
if None in packages:
# Notify developers so they can fix msg2packages
log.error('NoneType found in packages of %r' % msg_id)
# And prune out the bad value
packages = [pkg for pkg in packages if pkg is not None]
# If we've never seen one of these users before, then:
# 1) make sure they exist in the db (create them if necessary)
# 2) mark an in memory cache so we can remember that they exist without
# having to hit the db.
for username in usernames:
if username not in _users_seen:
# Create the user in the DB if necessary
User.get_or_create(username)
# Then just mark an in memory cache noting that we've seen them.
_users_seen.add(username)
for package in packages:
if package not in _packages_seen:
Package.get_or_create(package)
_packages_seen.add(package)
session.flush()
# These two blocks would normally be a simple "obj.users.append(user)" kind
# of statement, but here we drop down out of sqlalchemy's ORM and into the
# sql abstraction in order to gain a little performance boost.
values = [{'username': username, 'msg': obj.id} for username in usernames]
if values:
session.execute(user_assoc_table.insert(), values)
values = [{'package': package, 'msg': obj.id} for package in packages]
if values:
session.execute(pack_assoc_table.insert(), values)
# TODO -- can we avoid committing every time?
session.flush()
session.commit() | def function[add, parameter[envelope]]:
constant[ Take a dict-like fedmsg envelope and store the headers and message
in the table.
]
variable[message] assign[=] call[name[envelope]][constant[body]]
variable[timestamp] assign[=] call[name[message].get, parameter[constant[timestamp], constant[None]]]
<ast.Try object at 0x7da1b1dc3b50>
variable[headers] assign[=] call[name[envelope].get, parameter[constant[headers], constant[None]]]
variable[msg_id] assign[=] call[name[message].get, parameter[constant[msg_id], constant[None]]]
if <ast.BoolOp object at 0x7da1b1dc34c0> begin[:]
variable[msg_id] assign[=] call[name[headers].get, parameter[constant[message-id], constant[None]]]
if <ast.UnaryOp object at 0x7da1b1dc3280> begin[:]
variable[msg_id] assign[=] binary_operation[binary_operation[call[name[six].text_type, parameter[name[timestamp].year]] + call[name[six].u, parameter[constant[-]]]] + call[name[six].text_type, parameter[call[name[uuid].uuid4, parameter[]]]]]
variable[obj] assign[=] call[name[Message], parameter[]]
name[obj].msg assign[=] call[name[message]][constant[msg]]
name[obj].headers assign[=] name[headers]
<ast.Try object at 0x7da1b1dc24d0>
variable[usernames] assign[=] call[name[fedmsg].meta.msg2usernames, parameter[name[message]]]
variable[packages] assign[=] call[name[fedmsg].meta.msg2packages, parameter[name[message]]]
if compare[constant[None] in name[usernames]] begin[:]
call[name[log].error, parameter[binary_operation[constant[NoneType found in usernames of %r] <ast.Mod object at 0x7da2590d6920> name[msg_id]]]]
variable[usernames] assign[=] <ast.ListComp object at 0x7da1b1dc19c0>
if compare[constant[None] in name[packages]] begin[:]
call[name[log].error, parameter[binary_operation[constant[NoneType found in packages of %r] <ast.Mod object at 0x7da2590d6920> name[msg_id]]]]
variable[packages] assign[=] <ast.ListComp object at 0x7da1b1dc1570>
for taget[name[username]] in starred[name[usernames]] begin[:]
if compare[name[username] <ast.NotIn object at 0x7da2590d7190> name[_users_seen]] begin[:]
call[name[User].get_or_create, parameter[name[username]]]
call[name[_users_seen].add, parameter[name[username]]]
for taget[name[package]] in starred[name[packages]] begin[:]
if compare[name[package] <ast.NotIn object at 0x7da2590d7190> name[_packages_seen]] begin[:]
call[name[Package].get_or_create, parameter[name[package]]]
call[name[_packages_seen].add, parameter[name[package]]]
call[name[session].flush, parameter[]]
variable[values] assign[=] <ast.ListComp object at 0x7da1b1dc0b50>
if name[values] begin[:]
call[name[session].execute, parameter[call[name[user_assoc_table].insert, parameter[]], name[values]]]
variable[values] assign[=] <ast.ListComp object at 0x7da1b1dc06d0>
if name[values] begin[:]
call[name[session].execute, parameter[call[name[pack_assoc_table].insert, parameter[]], name[values]]]
call[name[session].flush, parameter[]]
call[name[session].commit, parameter[]] | keyword[def] identifier[add] ( identifier[envelope] ):
literal[string]
identifier[message] = identifier[envelope] [ literal[string] ]
identifier[timestamp] = identifier[message] . identifier[get] ( literal[string] , keyword[None] )
keyword[try] :
keyword[if] identifier[timestamp] :
identifier[timestamp] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[timestamp] )
keyword[else] :
identifier[timestamp] = identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
keyword[except] identifier[Exception] :
keyword[pass]
identifier[headers] = identifier[envelope] . identifier[get] ( literal[string] , keyword[None] )
identifier[msg_id] = identifier[message] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[msg_id] keyword[and] identifier[headers] :
identifier[msg_id] = identifier[headers] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[msg_id] :
identifier[msg_id] = identifier[six] . identifier[text_type] ( identifier[timestamp] . identifier[year] )+ identifier[six] . identifier[u] ( literal[string] )+ identifier[six] . identifier[text_type] ( identifier[uuid] . identifier[uuid4] ())
identifier[obj] = identifier[Message] (
identifier[i] = identifier[message] . identifier[get] ( literal[string] , literal[int] ),
identifier[msg_id] = identifier[msg_id] ,
identifier[topic] = identifier[message] [ literal[string] ],
identifier[timestamp] = identifier[timestamp] ,
identifier[username] = identifier[message] . identifier[get] ( literal[string] , keyword[None] ),
identifier[crypto] = identifier[message] . identifier[get] ( literal[string] , keyword[None] ),
identifier[certificate] = identifier[message] . identifier[get] ( literal[string] , keyword[None] ),
identifier[signature] = identifier[message] . identifier[get] ( literal[string] , keyword[None] ),
)
identifier[obj] . identifier[msg] = identifier[message] [ literal[string] ]
identifier[obj] . identifier[headers] = identifier[headers]
keyword[try] :
identifier[session] . identifier[add] ( identifier[obj] )
identifier[session] . identifier[flush] ()
keyword[except] identifier[IntegrityError] :
identifier[log] . identifier[warning] ( literal[string] ,
identifier[message] [ literal[string] ], identifier[msg_id] )
identifier[session] . identifier[rollback] ()
keyword[return]
identifier[usernames] = identifier[fedmsg] . identifier[meta] . identifier[msg2usernames] ( identifier[message] )
identifier[packages] = identifier[fedmsg] . identifier[meta] . identifier[msg2packages] ( identifier[message] )
keyword[if] keyword[None] keyword[in] identifier[usernames] :
identifier[log] . identifier[error] ( literal[string] % identifier[msg_id] )
identifier[usernames] =[ identifier[name] keyword[for] identifier[name] keyword[in] identifier[usernames] keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] ]
keyword[if] keyword[None] keyword[in] identifier[packages] :
identifier[log] . identifier[error] ( literal[string] % identifier[msg_id] )
identifier[packages] =[ identifier[pkg] keyword[for] identifier[pkg] keyword[in] identifier[packages] keyword[if] identifier[pkg] keyword[is] keyword[not] keyword[None] ]
keyword[for] identifier[username] keyword[in] identifier[usernames] :
keyword[if] identifier[username] keyword[not] keyword[in] identifier[_users_seen] :
identifier[User] . identifier[get_or_create] ( identifier[username] )
identifier[_users_seen] . identifier[add] ( identifier[username] )
keyword[for] identifier[package] keyword[in] identifier[packages] :
keyword[if] identifier[package] keyword[not] keyword[in] identifier[_packages_seen] :
identifier[Package] . identifier[get_or_create] ( identifier[package] )
identifier[_packages_seen] . identifier[add] ( identifier[package] )
identifier[session] . identifier[flush] ()
identifier[values] =[{ literal[string] : identifier[username] , literal[string] : identifier[obj] . identifier[id] } keyword[for] identifier[username] keyword[in] identifier[usernames] ]
keyword[if] identifier[values] :
identifier[session] . identifier[execute] ( identifier[user_assoc_table] . identifier[insert] (), identifier[values] )
identifier[values] =[{ literal[string] : identifier[package] , literal[string] : identifier[obj] . identifier[id] } keyword[for] identifier[package] keyword[in] identifier[packages] ]
keyword[if] identifier[values] :
identifier[session] . identifier[execute] ( identifier[pack_assoc_table] . identifier[insert] (), identifier[values] )
identifier[session] . identifier[flush] ()
identifier[session] . identifier[commit] () | def add(envelope):
""" Take a dict-like fedmsg envelope and store the headers and message
in the table.
"""
message = envelope['body']
timestamp = message.get('timestamp', None)
try:
if timestamp:
timestamp = datetime.datetime.utcfromtimestamp(timestamp) # depends on [control=['if'], data=[]]
else:
timestamp = datetime.datetime.utcnow() # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
headers = envelope.get('headers', None)
msg_id = message.get('msg_id', None)
if not msg_id and headers:
msg_id = headers.get('message-id', None) # depends on [control=['if'], data=[]]
if not msg_id:
msg_id = six.text_type(timestamp.year) + six.u('-') + six.text_type(uuid.uuid4()) # depends on [control=['if'], data=[]]
obj = Message(i=message.get('i', 0), msg_id=msg_id, topic=message['topic'], timestamp=timestamp, username=message.get('username', None), crypto=message.get('crypto', None), certificate=message.get('certificate', None), signature=message.get('signature', None))
obj.msg = message['msg']
obj.headers = headers
try:
session.add(obj)
session.flush() # depends on [control=['try'], data=[]]
except IntegrityError:
log.warning('Skipping message from %s with duplicate id: %s', message['topic'], msg_id)
session.rollback()
return # depends on [control=['except'], data=[]]
usernames = fedmsg.meta.msg2usernames(message)
packages = fedmsg.meta.msg2packages(message)
# Do a little sanity checking on fedmsg.meta results
if None in usernames:
# Notify developers so they can fix msg2usernames
log.error('NoneType found in usernames of %r' % msg_id)
# And prune out the bad value
usernames = [name for name in usernames if name is not None] # depends on [control=['if'], data=['usernames']]
if None in packages:
# Notify developers so they can fix msg2packages
log.error('NoneType found in packages of %r' % msg_id)
# And prune out the bad value
packages = [pkg for pkg in packages if pkg is not None] # depends on [control=['if'], data=['packages']]
# If we've never seen one of these users before, then:
# 1) make sure they exist in the db (create them if necessary)
# 2) mark an in memory cache so we can remember that they exist without
# having to hit the db.
for username in usernames:
if username not in _users_seen:
# Create the user in the DB if necessary
User.get_or_create(username)
# Then just mark an in memory cache noting that we've seen them.
_users_seen.add(username) # depends on [control=['if'], data=['username', '_users_seen']] # depends on [control=['for'], data=['username']]
for package in packages:
if package not in _packages_seen:
Package.get_or_create(package)
_packages_seen.add(package) # depends on [control=['if'], data=['package', '_packages_seen']] # depends on [control=['for'], data=['package']]
session.flush()
# These two blocks would normally be a simple "obj.users.append(user)" kind
# of statement, but here we drop down out of sqlalchemy's ORM and into the
# sql abstraction in order to gain a little performance boost.
values = [{'username': username, 'msg': obj.id} for username in usernames]
if values:
session.execute(user_assoc_table.insert(), values) # depends on [control=['if'], data=[]]
values = [{'package': package, 'msg': obj.id} for package in packages]
if values:
session.execute(pack_assoc_table.insert(), values) # depends on [control=['if'], data=[]]
# TODO -- can we avoid committing every time?
session.flush()
session.commit() |
def to_ipa(s, delimiter=' ', all_readings=False, container='[]'):
"""Convert a string's Chinese characters to IPA.
*s* is a string containing Chinese characters.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched.
"""
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False)
ipa = pinyin_to_ipa(numbered_pinyin)
return ipa | def function[to_ipa, parameter[s, delimiter, all_readings, container]]:
constant[Convert a string's Chinese characters to IPA.
*s* is a string containing Chinese characters.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched.
]
variable[numbered_pinyin] assign[=] call[name[to_pinyin], parameter[name[s], name[delimiter], name[all_readings], name[container], constant[False]]]
variable[ipa] assign[=] call[name[pinyin_to_ipa], parameter[name[numbered_pinyin]]]
return[name[ipa]] | keyword[def] identifier[to_ipa] ( identifier[s] , identifier[delimiter] = literal[string] , identifier[all_readings] = keyword[False] , identifier[container] = literal[string] ):
literal[string]
identifier[numbered_pinyin] = identifier[to_pinyin] ( identifier[s] , identifier[delimiter] , identifier[all_readings] , identifier[container] , keyword[False] )
identifier[ipa] = identifier[pinyin_to_ipa] ( identifier[numbered_pinyin] )
keyword[return] identifier[ipa] | def to_ipa(s, delimiter=' ', all_readings=False, container='[]'):
"""Convert a string's Chinese characters to IPA.
*s* is a string containing Chinese characters.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched.
"""
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False)
ipa = pinyin_to_ipa(numbered_pinyin)
return ipa |
def parse_request(self, request, parameters, fake_method=None):
'''
Parse Django request
'''
return (fake_method or request.method,
request.build_absolute_uri(),
request.META,
(dict(request.POST.iteritems())
if request.method == 'POST'
else parameters)) | def function[parse_request, parameter[self, request, parameters, fake_method]]:
constant[
Parse Django request
]
return[tuple[[<ast.BoolOp object at 0x7da1b1b342b0>, <ast.Call object at 0x7da1b1b351e0>, <ast.Attribute object at 0x7da1b1b34160>, <ast.IfExp object at 0x7da1b1b34e50>]]] | keyword[def] identifier[parse_request] ( identifier[self] , identifier[request] , identifier[parameters] , identifier[fake_method] = keyword[None] ):
literal[string]
keyword[return] ( identifier[fake_method] keyword[or] identifier[request] . identifier[method] ,
identifier[request] . identifier[build_absolute_uri] (),
identifier[request] . identifier[META] ,
( identifier[dict] ( identifier[request] . identifier[POST] . identifier[iteritems] ())
keyword[if] identifier[request] . identifier[method] == literal[string]
keyword[else] identifier[parameters] )) | def parse_request(self, request, parameters, fake_method=None):
"""
Parse Django request
"""
return (fake_method or request.method, request.build_absolute_uri(), request.META, dict(request.POST.iteritems()) if request.method == 'POST' else parameters) |
def _init_client(self, from_archive=False):
"""Init client"""
return ConfluenceClient(self.url, archive=self.archive, from_archive=from_archive) | def function[_init_client, parameter[self, from_archive]]:
constant[Init client]
return[call[name[ConfluenceClient], parameter[name[self].url]]] | keyword[def] identifier[_init_client] ( identifier[self] , identifier[from_archive] = keyword[False] ):
literal[string]
keyword[return] identifier[ConfluenceClient] ( identifier[self] . identifier[url] , identifier[archive] = identifier[self] . identifier[archive] , identifier[from_archive] = identifier[from_archive] ) | def _init_client(self, from_archive=False):
"""Init client"""
return ConfluenceClient(self.url, archive=self.archive, from_archive=from_archive) |
def on_command_control(self, val_id):
"""
Parameters
----------
val_id : str
Returns
-------
requests.Response
"""
data = "control,controlId=1|" + val_id
return self._basic_post(url='commandControlPublic', data=data) | def function[on_command_control, parameter[self, val_id]]:
constant[
Parameters
----------
val_id : str
Returns
-------
requests.Response
]
variable[data] assign[=] binary_operation[constant[control,controlId=1|] + name[val_id]]
return[call[name[self]._basic_post, parameter[]]] | keyword[def] identifier[on_command_control] ( identifier[self] , identifier[val_id] ):
literal[string]
identifier[data] = literal[string] + identifier[val_id]
keyword[return] identifier[self] . identifier[_basic_post] ( identifier[url] = literal[string] , identifier[data] = identifier[data] ) | def on_command_control(self, val_id):
"""
Parameters
----------
val_id : str
Returns
-------
requests.Response
"""
data = 'control,controlId=1|' + val_id
return self._basic_post(url='commandControlPublic', data=data) |
def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime)
else:
t = dt.datetime.strftime(datetime, time_format)
return t | def function[get_date, parameter[datetime, time_format]]:
constant[
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
]
if compare[name[time_format] is constant[None]] begin[:]
variable[t] assign[=] call[name[du].parser.parse, parameter[name[datetime]]]
return[name[t]] | keyword[def] identifier[get_date] ( identifier[datetime] , identifier[time_format] = keyword[None] ):
literal[string]
keyword[if] identifier[time_format] keyword[is] keyword[None] :
identifier[t] = identifier[du] . identifier[parser] . identifier[parse] ( identifier[datetime] )
keyword[else] :
identifier[t] = identifier[dt] . identifier[datetime] . identifier[strftime] ( identifier[datetime] , identifier[time_format] )
keyword[return] identifier[t] | def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime) # depends on [control=['if'], data=[]]
else:
t = dt.datetime.strftime(datetime, time_format)
return t |
def get_capabilities(cls):
"""List the model's capabilities."""
capabilities = []
for _cls in cls.mro():
if issubclass(_cls, Capability) and _cls is not Capability \
and not issubclass(_cls, Model):
capabilities.append(_cls)
return capabilities | def function[get_capabilities, parameter[cls]]:
constant[List the model's capabilities.]
variable[capabilities] assign[=] list[[]]
for taget[name[_cls]] in starred[call[name[cls].mro, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0e646a0> begin[:]
call[name[capabilities].append, parameter[name[_cls]]]
return[name[capabilities]] | keyword[def] identifier[get_capabilities] ( identifier[cls] ):
literal[string]
identifier[capabilities] =[]
keyword[for] identifier[_cls] keyword[in] identifier[cls] . identifier[mro] ():
keyword[if] identifier[issubclass] ( identifier[_cls] , identifier[Capability] ) keyword[and] identifier[_cls] keyword[is] keyword[not] identifier[Capability] keyword[and] keyword[not] identifier[issubclass] ( identifier[_cls] , identifier[Model] ):
identifier[capabilities] . identifier[append] ( identifier[_cls] )
keyword[return] identifier[capabilities] | def get_capabilities(cls):
"""List the model's capabilities."""
capabilities = []
for _cls in cls.mro():
if issubclass(_cls, Capability) and _cls is not Capability and (not issubclass(_cls, Model)):
capabilities.append(_cls) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_cls']]
return capabilities |
def cmdify(self, extra_args=None):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html
"""
parts = list(self._parts)
if extra_args:
parts.extend(extra_args)
return " ".join(
arg if not next(re.finditer(r'\s', arg), None)
else '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', arg))
for arg in parts
) | def function[cmdify, parameter[self, extra_args]]:
constant[Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html
]
variable[parts] assign[=] call[name[list], parameter[name[self]._parts]]
if name[extra_args] begin[:]
call[name[parts].extend, parameter[name[extra_args]]]
return[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da18f09cb50>]]] | keyword[def] identifier[cmdify] ( identifier[self] , identifier[extra_args] = keyword[None] ):
literal[string]
identifier[parts] = identifier[list] ( identifier[self] . identifier[_parts] )
keyword[if] identifier[extra_args] :
identifier[parts] . identifier[extend] ( identifier[extra_args] )
keyword[return] literal[string] . identifier[join] (
identifier[arg] keyword[if] keyword[not] identifier[next] ( identifier[re] . identifier[finditer] ( literal[string] , identifier[arg] ), keyword[None] )
keyword[else] literal[string] . identifier[format] ( identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[arg] ))
keyword[for] identifier[arg] keyword[in] identifier[parts]
) | def cmdify(self, extra_args=None):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html
"""
parts = list(self._parts)
if extra_args:
parts.extend(extra_args) # depends on [control=['if'], data=[]]
return ' '.join((arg if not next(re.finditer('\\s', arg), None) else '"{0}"'.format(re.sub('(\\\\*)"', '\\1\\1\\\\"', arg)) for arg in parts)) |
def add_kv_store(self, key, value):
"""Add a key-value store entry.
:param key: string
:param value: string
"""
data = {
'operation': 'STORE',
'key': key,
'value': value
}
return self.post(self.make_url("/useragent-kv"), data=to_json(data),
headers=self.default_headers).text | def function[add_kv_store, parameter[self, key, value]]:
constant[Add a key-value store entry.
:param key: string
:param value: string
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93fa0>, <ast.Constant object at 0x7da18fe91cc0>, <ast.Constant object at 0x7da18fe916c0>], [<ast.Constant object at 0x7da18fe93f10>, <ast.Name object at 0x7da18fe934c0>, <ast.Name object at 0x7da18fe929e0>]]
return[call[name[self].post, parameter[call[name[self].make_url, parameter[constant[/useragent-kv]]]]].text] | keyword[def] identifier[add_kv_store] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[data] ={
literal[string] : literal[string] ,
literal[string] : identifier[key] ,
literal[string] : identifier[value]
}
keyword[return] identifier[self] . identifier[post] ( identifier[self] . identifier[make_url] ( literal[string] ), identifier[data] = identifier[to_json] ( identifier[data] ),
identifier[headers] = identifier[self] . identifier[default_headers] ). identifier[text] | def add_kv_store(self, key, value):
"""Add a key-value store entry.
:param key: string
:param value: string
"""
data = {'operation': 'STORE', 'key': key, 'value': value}
return self.post(self.make_url('/useragent-kv'), data=to_json(data), headers=self.default_headers).text |
def update(self, name, html_url, zip_url):
"""Updates this email template."""
body = {
"Name": name,
"HtmlPageURL": html_url,
"ZipFileURL": zip_url}
response = self._put("/templates/%s.json" %
self.template_id, json.dumps(body)) | def function[update, parameter[self, name, html_url, zip_url]]:
constant[Updates this email template.]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da20c7ca980>, <ast.Constant object at 0x7da20c7ca8f0>, <ast.Constant object at 0x7da20c7c9fc0>], [<ast.Name object at 0x7da20c7c8430>, <ast.Name object at 0x7da20c7cb4c0>, <ast.Name object at 0x7da20c7c8c40>]]
variable[response] assign[=] call[name[self]._put, parameter[binary_operation[constant[/templates/%s.json] <ast.Mod object at 0x7da2590d6920> name[self].template_id], call[name[json].dumps, parameter[name[body]]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[name] , identifier[html_url] , identifier[zip_url] ):
literal[string]
identifier[body] ={
literal[string] : identifier[name] ,
literal[string] : identifier[html_url] ,
literal[string] : identifier[zip_url] }
identifier[response] = identifier[self] . identifier[_put] ( literal[string] %
identifier[self] . identifier[template_id] , identifier[json] . identifier[dumps] ( identifier[body] )) | def update(self, name, html_url, zip_url):
"""Updates this email template."""
body = {'Name': name, 'HtmlPageURL': html_url, 'ZipFileURL': zip_url}
response = self._put('/templates/%s.json' % self.template_id, json.dumps(body)) |
def _get(self, api_call, params, method='GET', file_=None):
"""Function to preapre API call.
Parameters:
api_call (str): API function to be called.
params (dict): API function parameters.
method (str): (Defauld: GET) HTTP method 'GET' or 'POST'
file_ (file): File to upload.
"""
url = self._build_url(api_call)
if method == 'GET':
request_args = {'params': params}
else:
if self.password_hash is None:
self._build_hash_string()
# Set login
params['login'] = self.username
params['password_hash'] = self.password_hash
request_args = {'data': params, 'files': file_}
# Do call
return self._request(url, api_call, request_args, method) | def function[_get, parameter[self, api_call, params, method, file_]]:
constant[Function to preapre API call.
Parameters:
api_call (str): API function to be called.
params (dict): API function parameters.
method (str): (Defauld: GET) HTTP method 'GET' or 'POST'
file_ (file): File to upload.
]
variable[url] assign[=] call[name[self]._build_url, parameter[name[api_call]]]
if compare[name[method] equal[==] constant[GET]] begin[:]
variable[request_args] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d1a050>], [<ast.Name object at 0x7da1b0d19f30>]]
return[call[name[self]._request, parameter[name[url], name[api_call], name[request_args], name[method]]]] | keyword[def] identifier[_get] ( identifier[self] , identifier[api_call] , identifier[params] , identifier[method] = literal[string] , identifier[file_] = keyword[None] ):
literal[string]
identifier[url] = identifier[self] . identifier[_build_url] ( identifier[api_call] )
keyword[if] identifier[method] == literal[string] :
identifier[request_args] ={ literal[string] : identifier[params] }
keyword[else] :
keyword[if] identifier[self] . identifier[password_hash] keyword[is] keyword[None] :
identifier[self] . identifier[_build_hash_string] ()
identifier[params] [ literal[string] ]= identifier[self] . identifier[username]
identifier[params] [ literal[string] ]= identifier[self] . identifier[password_hash]
identifier[request_args] ={ literal[string] : identifier[params] , literal[string] : identifier[file_] }
keyword[return] identifier[self] . identifier[_request] ( identifier[url] , identifier[api_call] , identifier[request_args] , identifier[method] ) | def _get(self, api_call, params, method='GET', file_=None):
"""Function to preapre API call.
Parameters:
api_call (str): API function to be called.
params (dict): API function parameters.
method (str): (Defauld: GET) HTTP method 'GET' or 'POST'
file_ (file): File to upload.
"""
url = self._build_url(api_call)
if method == 'GET':
request_args = {'params': params} # depends on [control=['if'], data=[]]
else:
if self.password_hash is None:
self._build_hash_string() # depends on [control=['if'], data=[]]
# Set login
params['login'] = self.username
params['password_hash'] = self.password_hash
request_args = {'data': params, 'files': file_}
# Do call
return self._request(url, api_call, request_args, method) |
def get_actor(self, actor, aid, check_monitor=True):
'''Given an actor unique id return the actor proxy.'''
a = super().get_actor(actor, aid)
if a is None:
if aid in self.monitors: # Check in monitors aid
return self.monitors[aid]
elif aid in self.managed_actors:
return self.managed_actors[aid]
elif aid in self.registered:
return self.registered[aid]
else: # Finally check in workers in monitors
for m in self.monitors.values():
a = m.get_actor(aid, check_monitor=False)
if a is not None:
return a
else:
return a | def function[get_actor, parameter[self, actor, aid, check_monitor]]:
constant[Given an actor unique id return the actor proxy.]
variable[a] assign[=] call[call[name[super], parameter[]].get_actor, parameter[name[actor], name[aid]]]
if compare[name[a] is constant[None]] begin[:]
if compare[name[aid] in name[self].monitors] begin[:]
return[call[name[self].monitors][name[aid]]] | keyword[def] identifier[get_actor] ( identifier[self] , identifier[actor] , identifier[aid] , identifier[check_monitor] = keyword[True] ):
literal[string]
identifier[a] = identifier[super] (). identifier[get_actor] ( identifier[actor] , identifier[aid] )
keyword[if] identifier[a] keyword[is] keyword[None] :
keyword[if] identifier[aid] keyword[in] identifier[self] . identifier[monitors] :
keyword[return] identifier[self] . identifier[monitors] [ identifier[aid] ]
keyword[elif] identifier[aid] keyword[in] identifier[self] . identifier[managed_actors] :
keyword[return] identifier[self] . identifier[managed_actors] [ identifier[aid] ]
keyword[elif] identifier[aid] keyword[in] identifier[self] . identifier[registered] :
keyword[return] identifier[self] . identifier[registered] [ identifier[aid] ]
keyword[else] :
keyword[for] identifier[m] keyword[in] identifier[self] . identifier[monitors] . identifier[values] ():
identifier[a] = identifier[m] . identifier[get_actor] ( identifier[aid] , identifier[check_monitor] = keyword[False] )
keyword[if] identifier[a] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[a]
keyword[else] :
keyword[return] identifier[a] | def get_actor(self, actor, aid, check_monitor=True):
"""Given an actor unique id return the actor proxy."""
a = super().get_actor(actor, aid)
if a is None:
if aid in self.monitors: # Check in monitors aid
return self.monitors[aid] # depends on [control=['if'], data=['aid']]
elif aid in self.managed_actors:
return self.managed_actors[aid] # depends on [control=['if'], data=['aid']]
elif aid in self.registered:
return self.registered[aid] # depends on [control=['if'], data=['aid']]
else: # Finally check in workers in monitors
for m in self.monitors.values():
a = m.get_actor(aid, check_monitor=False)
if a is not None:
return a # depends on [control=['if'], data=['a']] # depends on [control=['for'], data=['m']] # depends on [control=['if'], data=['a']]
else:
return a |
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value | def function[set, parameter[self, key, value]]:
constant[Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
]
variable[k] assign[=] call[name[self]._real_key, parameter[call[name[key].lower, parameter[]]]]
call[name[self]._override][name[k]] assign[=] name[value] | keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[k] = identifier[self] . identifier[_real_key] ( identifier[key] . identifier[lower] ())
identifier[self] . identifier[_override] [ identifier[k] ]= identifier[value] | def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value |
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
type = msg.get_type()
master = self.master
# add some status fields
if type in [ 'RC_CHANNELS_RAW' ]:
rc6 = msg.chan6_raw
if rc6 > 1500:
ign_colour = 'green'
else:
ign_colour = 'red'
self.console.set_status('IGN', 'IGN', fg=ign_colour, row=4)
if type in [ 'SERVO_OUTPUT_RAW' ]:
rc8 = msg.servo8_raw
if rc8 < 1200:
thr_colour = 'red'
elif rc8 < 1300:
thr_colour = 'orange'
else:
thr_colour = 'green'
self.console.set_status('THR', 'THR', fg=thr_colour, row=4)
if type in [ 'RPM' ]:
rpm = msg.rpm1
if rpm < 3000:
rpm_colour = 'red'
elif rpm < 10000:
rpm_colour = 'orange'
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4) | def function[mavlink_packet, parameter[self, msg]]:
constant[handle an incoming mavlink packet]
variable[type] assign[=] call[name[msg].get_type, parameter[]]
variable[master] assign[=] name[self].master
if compare[name[type] in list[[<ast.Constant object at 0x7da204345ba0>]]] begin[:]
variable[rc6] assign[=] name[msg].chan6_raw
if compare[name[rc6] greater[>] constant[1500]] begin[:]
variable[ign_colour] assign[=] constant[green]
call[name[self].console.set_status, parameter[constant[IGN], constant[IGN]]]
if compare[name[type] in list[[<ast.Constant object at 0x7da18f58c610>]]] begin[:]
variable[rc8] assign[=] name[msg].servo8_raw
if compare[name[rc8] less[<] constant[1200]] begin[:]
variable[thr_colour] assign[=] constant[red]
call[name[self].console.set_status, parameter[constant[THR], constant[THR]]]
if compare[name[type] in list[[<ast.Constant object at 0x7da18f58e1d0>]]] begin[:]
variable[rpm] assign[=] name[msg].rpm1
if compare[name[rpm] less[<] constant[3000]] begin[:]
variable[rpm_colour] assign[=] constant[red]
call[name[self].console.set_status, parameter[constant[RPM], binary_operation[constant[RPM: %u] <ast.Mod object at 0x7da2590d6920> name[rpm]]]] | keyword[def] identifier[mavlink_packet] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[type] = identifier[msg] . identifier[get_type] ()
identifier[master] = identifier[self] . identifier[master]
keyword[if] identifier[type] keyword[in] [ literal[string] ]:
identifier[rc6] = identifier[msg] . identifier[chan6_raw]
keyword[if] identifier[rc6] > literal[int] :
identifier[ign_colour] = literal[string]
keyword[else] :
identifier[ign_colour] = literal[string]
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = identifier[ign_colour] , identifier[row] = literal[int] )
keyword[if] identifier[type] keyword[in] [ literal[string] ]:
identifier[rc8] = identifier[msg] . identifier[servo8_raw]
keyword[if] identifier[rc8] < literal[int] :
identifier[thr_colour] = literal[string]
keyword[elif] identifier[rc8] < literal[int] :
identifier[thr_colour] = literal[string]
keyword[else] :
identifier[thr_colour] = literal[string]
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] , identifier[fg] = identifier[thr_colour] , identifier[row] = literal[int] )
keyword[if] identifier[type] keyword[in] [ literal[string] ]:
identifier[rpm] = identifier[msg] . identifier[rpm1]
keyword[if] identifier[rpm] < literal[int] :
identifier[rpm_colour] = literal[string]
keyword[elif] identifier[rpm] < literal[int] :
identifier[rpm_colour] = literal[string]
keyword[else] :
identifier[rpm_colour] = literal[string]
identifier[self] . identifier[console] . identifier[set_status] ( literal[string] , literal[string] % identifier[rpm] , identifier[fg] = identifier[rpm_colour] , identifier[row] = literal[int] ) | def mavlink_packet(self, msg):
"""handle an incoming mavlink packet"""
type = msg.get_type()
master = self.master
# add some status fields
if type in ['RC_CHANNELS_RAW']:
rc6 = msg.chan6_raw
if rc6 > 1500:
ign_colour = 'green' # depends on [control=['if'], data=[]]
else:
ign_colour = 'red'
self.console.set_status('IGN', 'IGN', fg=ign_colour, row=4) # depends on [control=['if'], data=[]]
if type in ['SERVO_OUTPUT_RAW']:
rc8 = msg.servo8_raw
if rc8 < 1200:
thr_colour = 'red' # depends on [control=['if'], data=[]]
elif rc8 < 1300:
thr_colour = 'orange' # depends on [control=['if'], data=[]]
else:
thr_colour = 'green'
self.console.set_status('THR', 'THR', fg=thr_colour, row=4) # depends on [control=['if'], data=[]]
if type in ['RPM']:
rpm = msg.rpm1
if rpm < 3000:
rpm_colour = 'red' # depends on [control=['if'], data=[]]
elif rpm < 10000:
rpm_colour = 'orange' # depends on [control=['if'], data=[]]
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4) # depends on [control=['if'], data=[]] |
def renew_step_dir(step_dir: str):
"""Delete step directory if exists and create, reporting actions."""
if os.path.exists(step_dir):
logging.info("Remove unfinished step %s", step_dir)
shutil.rmtree(step_dir)
logging.info("Create: %s", step_dir)
os.makedirs(step_dir) | def function[renew_step_dir, parameter[step_dir]]:
constant[Delete step directory if exists and create, reporting actions.]
if call[name[os].path.exists, parameter[name[step_dir]]] begin[:]
call[name[logging].info, parameter[constant[Remove unfinished step %s], name[step_dir]]]
call[name[shutil].rmtree, parameter[name[step_dir]]]
call[name[logging].info, parameter[constant[Create: %s], name[step_dir]]]
call[name[os].makedirs, parameter[name[step_dir]]] | keyword[def] identifier[renew_step_dir] ( identifier[step_dir] : identifier[str] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[step_dir] ):
identifier[logging] . identifier[info] ( literal[string] , identifier[step_dir] )
identifier[shutil] . identifier[rmtree] ( identifier[step_dir] )
identifier[logging] . identifier[info] ( literal[string] , identifier[step_dir] )
identifier[os] . identifier[makedirs] ( identifier[step_dir] ) | def renew_step_dir(step_dir: str):
"""Delete step directory if exists and create, reporting actions."""
if os.path.exists(step_dir):
logging.info('Remove unfinished step %s', step_dir)
shutil.rmtree(step_dir) # depends on [control=['if'], data=[]]
logging.info('Create: %s', step_dir)
os.makedirs(step_dir) |
def optimal_parameters(reconstruction, fom, phantoms, data,
initial=None, univariate=False):
r"""Find the optimal parameters for a reconstruction method.
Notes
-----
For a forward operator :math:`A : X \to Y`, a reconstruction operator
parametrized by :math:`\theta` is some operator
:math:`R_\theta : Y \to X`
such that
.. math::
R_\theta(A(x)) \approx x.
The optimal choice of :math:`\theta` is given by
.. math::
\theta = \arg\min_\theta fom(R(A(x) + noise), x)
where :math:`fom : X \times X \to \mathbb{R}` is a figure of merit.
Parameters
----------
reconstruction : callable
Function that takes two parameters:
* data : The data to be reconstructed
* parameters : Parameters of the reconstruction method
The function should return the reconstructed image.
fom : callable
Function that takes two parameters:
* reconstructed_image
* true_image
and returns a scalar figure of merit.
phantoms : sequence
True images.
data : sequence
The data to reconstruct from.
initial : array-like or pair
Initial guess for the parameters. It is
- a required array in the multivariate case
- an optional pair in the univariate case.
univariate : bool, optional
Whether to use a univariate solver
Returns
-------
parameters : 'numpy.ndarray'
The optimal parameters for the reconstruction problem.
"""
def func(lam):
# Function to be minimized by scipy
return sum(fom(reconstruction(datai, lam), phantomi)
for phantomi, datai in zip(phantoms, data))
# Pick resolution to fit the one used by the space
tol = np.finfo(phantoms[0].space.dtype).resolution * 10
if univariate:
# We use a faster optimizer for the one parameter case
result = scipy.optimize.minimize_scalar(
func, bracket=initial, tol=tol, bounds=None,
options={'disp': False})
return result.x
else:
# Use a gradient free method to find the best parameters
initial = np.asarray(initial)
parameters = scipy.optimize.fmin_powell(
func, initial, xtol=tol, ftol=tol, disp=False)
return parameters | def function[optimal_parameters, parameter[reconstruction, fom, phantoms, data, initial, univariate]]:
constant[Find the optimal parameters for a reconstruction method.
Notes
-----
For a forward operator :math:`A : X \to Y`, a reconstruction operator
parametrized by :math:`\theta` is some operator
:math:`R_\theta : Y \to X`
such that
.. math::
R_\theta(A(x)) \approx x.
The optimal choice of :math:`\theta` is given by
.. math::
\theta = \arg\min_\theta fom(R(A(x) + noise), x)
where :math:`fom : X \times X \to \mathbb{R}` is a figure of merit.
Parameters
----------
reconstruction : callable
Function that takes two parameters:
* data : The data to be reconstructed
* parameters : Parameters of the reconstruction method
The function should return the reconstructed image.
fom : callable
Function that takes two parameters:
* reconstructed_image
* true_image
and returns a scalar figure of merit.
phantoms : sequence
True images.
data : sequence
The data to reconstruct from.
initial : array-like or pair
Initial guess for the parameters. It is
- a required array in the multivariate case
- an optional pair in the univariate case.
univariate : bool, optional
Whether to use a univariate solver
Returns
-------
parameters : 'numpy.ndarray'
The optimal parameters for the reconstruction problem.
]
def function[func, parameter[lam]]:
return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b1d62c80>]]]
variable[tol] assign[=] binary_operation[call[name[np].finfo, parameter[call[name[phantoms]][constant[0]].space.dtype]].resolution * constant[10]]
if name[univariate] begin[:]
variable[result] assign[=] call[name[scipy].optimize.minimize_scalar, parameter[name[func]]]
return[name[result].x] | keyword[def] identifier[optimal_parameters] ( identifier[reconstruction] , identifier[fom] , identifier[phantoms] , identifier[data] ,
identifier[initial] = keyword[None] , identifier[univariate] = keyword[False] ):
literal[string]
keyword[def] identifier[func] ( identifier[lam] ):
keyword[return] identifier[sum] ( identifier[fom] ( identifier[reconstruction] ( identifier[datai] , identifier[lam] ), identifier[phantomi] )
keyword[for] identifier[phantomi] , identifier[datai] keyword[in] identifier[zip] ( identifier[phantoms] , identifier[data] ))
identifier[tol] = identifier[np] . identifier[finfo] ( identifier[phantoms] [ literal[int] ]. identifier[space] . identifier[dtype] ). identifier[resolution] * literal[int]
keyword[if] identifier[univariate] :
identifier[result] = identifier[scipy] . identifier[optimize] . identifier[minimize_scalar] (
identifier[func] , identifier[bracket] = identifier[initial] , identifier[tol] = identifier[tol] , identifier[bounds] = keyword[None] ,
identifier[options] ={ literal[string] : keyword[False] })
keyword[return] identifier[result] . identifier[x]
keyword[else] :
identifier[initial] = identifier[np] . identifier[asarray] ( identifier[initial] )
identifier[parameters] = identifier[scipy] . identifier[optimize] . identifier[fmin_powell] (
identifier[func] , identifier[initial] , identifier[xtol] = identifier[tol] , identifier[ftol] = identifier[tol] , identifier[disp] = keyword[False] )
keyword[return] identifier[parameters] | def optimal_parameters(reconstruction, fom, phantoms, data, initial=None, univariate=False):
"""Find the optimal parameters for a reconstruction method.
Notes
-----
For a forward operator :math:`A : X \\to Y`, a reconstruction operator
parametrized by :math:`\\theta` is some operator
:math:`R_\\theta : Y \\to X`
such that
.. math::
R_\\theta(A(x)) \\approx x.
The optimal choice of :math:`\\theta` is given by
.. math::
\\theta = \\arg\\min_\\theta fom(R(A(x) + noise), x)
where :math:`fom : X \\times X \\to \\mathbb{R}` is a figure of merit.
Parameters
----------
reconstruction : callable
Function that takes two parameters:
* data : The data to be reconstructed
* parameters : Parameters of the reconstruction method
The function should return the reconstructed image.
fom : callable
Function that takes two parameters:
* reconstructed_image
* true_image
and returns a scalar figure of merit.
phantoms : sequence
True images.
data : sequence
The data to reconstruct from.
initial : array-like or pair
Initial guess for the parameters. It is
- a required array in the multivariate case
- an optional pair in the univariate case.
univariate : bool, optional
Whether to use a univariate solver
Returns
-------
parameters : 'numpy.ndarray'
The optimal parameters for the reconstruction problem.
"""
def func(lam):
# Function to be minimized by scipy
return sum((fom(reconstruction(datai, lam), phantomi) for (phantomi, datai) in zip(phantoms, data)))
# Pick resolution to fit the one used by the space
tol = np.finfo(phantoms[0].space.dtype).resolution * 10
if univariate:
# We use a faster optimizer for the one parameter case
result = scipy.optimize.minimize_scalar(func, bracket=initial, tol=tol, bounds=None, options={'disp': False})
return result.x # depends on [control=['if'], data=[]]
else:
# Use a gradient free method to find the best parameters
initial = np.asarray(initial)
parameters = scipy.optimize.fmin_powell(func, initial, xtol=tol, ftol=tol, disp=False)
return parameters |
def add_put(self, *args, **kwargs):
"""
Shortcut for add_route with method PUT
"""
return self.add_route(hdrs.METH_PUT, *args, **kwargs) | def function[add_put, parameter[self]]:
constant[
Shortcut for add_route with method PUT
]
return[call[name[self].add_route, parameter[name[hdrs].METH_PUT, <ast.Starred object at 0x7da1b0e24ee0>]]] | keyword[def] identifier[add_put] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[add_route] ( identifier[hdrs] . identifier[METH_PUT] ,* identifier[args] ,** identifier[kwargs] ) | def add_put(self, *args, **kwargs):
"""
Shortcut for add_route with method PUT
"""
return self.add_route(hdrs.METH_PUT, *args, **kwargs) |
def _update_trace_info(self, fields, hm):
"""Parses a trace line and updates the :attr:`status_info` attribute.
Parameters
----------
fields : list
List of the tab-seperated elements of the trace line
hm : dict
Maps the column IDs to their position in the fields argument.
This dictionary object is retrieve from :func:`_header_mapping`.
"""
process = fields[hm["process"]]
if process not in self.processes:
return
# Get information from a single line of trace file
info = dict((column, fields[pos]) for column, pos in hm.items())
# The headers that will be used to populate the process
process_tag_headers = ["realtime", "rss", "rchar", "wchar"]
for h in process_tag_headers:
# In the rare occasion the tag is parsed first in the trace
# file than the log file, add the new tag.
if info["tag"] not in self.process_tags[process]:
# If the 'start' tag is present in the trace, use that
# information. If not, it will be parsed in the log file.
try:
timestart = info["start"].split()[1]
except KeyError:
timestart = None
self.process_tags[process][info["tag"]] = {
"workdir": self._expand_path(info["hash"]),
"start": timestart
}
if h in info and info["tag"] != "-":
if h != "realtime" and info[h] != "-":
self.process_tags[process][info["tag"]][h] = \
round(self._size_coverter(info[h]), 2)
else:
self.process_tags[process][info["tag"]][h] = info[h]
# Set allocated cpu and memory information to process
if "cpus" in info and not self.processes[process]["cpus"]:
self.processes[process]["cpus"] = info["cpus"]
if "memory" in info and not self.processes[process]["memory"]:
try:
self.processes[process]["memory"] = self._size_coverter(
info["memory"])
except ValueError:
self.processes[process]["memory"] = None
if info["hash"] in self.stored_ids:
return
# If the task hash code is provided, expand it to the work directory
# and add a new entry
if "hash" in info:
hs = info["hash"]
info["work_dir"] = self._expand_path(hs)
if "tag" in info:
tag = info["tag"]
if tag != "-" and tag not in self.samples and \
tag.split()[0] not in self.samples:
self.samples.append(tag)
self.trace_info[process].append(info)
self.stored_ids.append(info["hash"]) | def function[_update_trace_info, parameter[self, fields, hm]]:
constant[Parses a trace line and updates the :attr:`status_info` attribute.
Parameters
----------
fields : list
List of the tab-seperated elements of the trace line
hm : dict
Maps the column IDs to their position in the fields argument.
This dictionary object is retrieve from :func:`_header_mapping`.
]
variable[process] assign[=] call[name[fields]][call[name[hm]][constant[process]]]
if compare[name[process] <ast.NotIn object at 0x7da2590d7190> name[self].processes] begin[:]
return[None]
variable[info] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b0389630>]]
variable[process_tag_headers] assign[=] list[[<ast.Constant object at 0x7da1b02c54b0>, <ast.Constant object at 0x7da1b02c4430>, <ast.Constant object at 0x7da1b02c5000>, <ast.Constant object at 0x7da1b02c44c0>]]
for taget[name[h]] in starred[name[process_tag_headers]] begin[:]
if compare[call[name[info]][constant[tag]] <ast.NotIn object at 0x7da2590d7190> call[name[self].process_tags][name[process]]] begin[:]
<ast.Try object at 0x7da1b02c4c10>
call[call[name[self].process_tags][name[process]]][call[name[info]][constant[tag]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b02c72b0>, <ast.Constant object at 0x7da1b02c5660>], [<ast.Call object at 0x7da1b02c7610>, <ast.Name object at 0x7da1b02c6230>]]
if <ast.BoolOp object at 0x7da1b02c6050> begin[:]
if <ast.BoolOp object at 0x7da1b02c4280> begin[:]
call[call[call[name[self].process_tags][name[process]]][call[name[info]][constant[tag]]]][name[h]] assign[=] call[name[round], parameter[call[name[self]._size_coverter, parameter[call[name[info]][name[h]]]], constant[2]]]
if <ast.BoolOp object at 0x7da1b02b8940> begin[:]
call[call[name[self].processes][name[process]]][constant[cpus]] assign[=] call[name[info]][constant[cpus]]
if <ast.BoolOp object at 0x7da1b02bae30> begin[:]
<ast.Try object at 0x7da1b02b96c0>
if compare[call[name[info]][constant[hash]] in name[self].stored_ids] begin[:]
return[None]
if compare[constant[hash] in name[info]] begin[:]
variable[hs] assign[=] call[name[info]][constant[hash]]
call[name[info]][constant[work_dir]] assign[=] call[name[self]._expand_path, parameter[name[hs]]]
if compare[constant[tag] in name[info]] begin[:]
variable[tag] assign[=] call[name[info]][constant[tag]]
if <ast.BoolOp object at 0x7da1b0218b50> begin[:]
call[name[self].samples.append, parameter[name[tag]]]
call[call[name[self].trace_info][name[process]].append, parameter[name[info]]]
call[name[self].stored_ids.append, parameter[call[name[info]][constant[hash]]]] | keyword[def] identifier[_update_trace_info] ( identifier[self] , identifier[fields] , identifier[hm] ):
literal[string]
identifier[process] = identifier[fields] [ identifier[hm] [ literal[string] ]]
keyword[if] identifier[process] keyword[not] keyword[in] identifier[self] . identifier[processes] :
keyword[return]
identifier[info] = identifier[dict] (( identifier[column] , identifier[fields] [ identifier[pos] ]) keyword[for] identifier[column] , identifier[pos] keyword[in] identifier[hm] . identifier[items] ())
identifier[process_tag_headers] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[h] keyword[in] identifier[process_tag_headers] :
keyword[if] identifier[info] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[process_tags] [ identifier[process] ]:
keyword[try] :
identifier[timestart] = identifier[info] [ literal[string] ]. identifier[split] ()[ literal[int] ]
keyword[except] identifier[KeyError] :
identifier[timestart] = keyword[None]
identifier[self] . identifier[process_tags] [ identifier[process] ][ identifier[info] [ literal[string] ]]={
literal[string] : identifier[self] . identifier[_expand_path] ( identifier[info] [ literal[string] ]),
literal[string] : identifier[timestart]
}
keyword[if] identifier[h] keyword[in] identifier[info] keyword[and] identifier[info] [ literal[string] ]!= literal[string] :
keyword[if] identifier[h] != literal[string] keyword[and] identifier[info] [ identifier[h] ]!= literal[string] :
identifier[self] . identifier[process_tags] [ identifier[process] ][ identifier[info] [ literal[string] ]][ identifier[h] ]= identifier[round] ( identifier[self] . identifier[_size_coverter] ( identifier[info] [ identifier[h] ]), literal[int] )
keyword[else] :
identifier[self] . identifier[process_tags] [ identifier[process] ][ identifier[info] [ literal[string] ]][ identifier[h] ]= identifier[info] [ identifier[h] ]
keyword[if] literal[string] keyword[in] identifier[info] keyword[and] keyword[not] identifier[self] . identifier[processes] [ identifier[process] ][ literal[string] ]:
identifier[self] . identifier[processes] [ identifier[process] ][ literal[string] ]= identifier[info] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[info] keyword[and] keyword[not] identifier[self] . identifier[processes] [ identifier[process] ][ literal[string] ]:
keyword[try] :
identifier[self] . identifier[processes] [ identifier[process] ][ literal[string] ]= identifier[self] . identifier[_size_coverter] (
identifier[info] [ literal[string] ])
keyword[except] identifier[ValueError] :
identifier[self] . identifier[processes] [ identifier[process] ][ literal[string] ]= keyword[None]
keyword[if] identifier[info] [ literal[string] ] keyword[in] identifier[self] . identifier[stored_ids] :
keyword[return]
keyword[if] literal[string] keyword[in] identifier[info] :
identifier[hs] = identifier[info] [ literal[string] ]
identifier[info] [ literal[string] ]= identifier[self] . identifier[_expand_path] ( identifier[hs] )
keyword[if] literal[string] keyword[in] identifier[info] :
identifier[tag] = identifier[info] [ literal[string] ]
keyword[if] identifier[tag] != literal[string] keyword[and] identifier[tag] keyword[not] keyword[in] identifier[self] . identifier[samples] keyword[and] identifier[tag] . identifier[split] ()[ literal[int] ] keyword[not] keyword[in] identifier[self] . identifier[samples] :
identifier[self] . identifier[samples] . identifier[append] ( identifier[tag] )
identifier[self] . identifier[trace_info] [ identifier[process] ]. identifier[append] ( identifier[info] )
identifier[self] . identifier[stored_ids] . identifier[append] ( identifier[info] [ literal[string] ]) | def _update_trace_info(self, fields, hm):
"""Parses a trace line and updates the :attr:`status_info` attribute.
Parameters
----------
fields : list
List of the tab-seperated elements of the trace line
hm : dict
Maps the column IDs to their position in the fields argument.
This dictionary object is retrieve from :func:`_header_mapping`.
"""
process = fields[hm['process']]
if process not in self.processes:
return # depends on [control=['if'], data=[]]
# Get information from a single line of trace file
info = dict(((column, fields[pos]) for (column, pos) in hm.items()))
# The headers that will be used to populate the process
process_tag_headers = ['realtime', 'rss', 'rchar', 'wchar']
for h in process_tag_headers:
# In the rare occasion the tag is parsed first in the trace
# file than the log file, add the new tag.
if info['tag'] not in self.process_tags[process]:
# If the 'start' tag is present in the trace, use that
# information. If not, it will be parsed in the log file.
try:
timestart = info['start'].split()[1] # depends on [control=['try'], data=[]]
except KeyError:
timestart = None # depends on [control=['except'], data=[]]
self.process_tags[process][info['tag']] = {'workdir': self._expand_path(info['hash']), 'start': timestart} # depends on [control=['if'], data=[]]
if h in info and info['tag'] != '-':
if h != 'realtime' and info[h] != '-':
self.process_tags[process][info['tag']][h] = round(self._size_coverter(info[h]), 2) # depends on [control=['if'], data=[]]
else:
self.process_tags[process][info['tag']][h] = info[h] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['h']]
# Set allocated cpu and memory information to process
if 'cpus' in info and (not self.processes[process]['cpus']):
self.processes[process]['cpus'] = info['cpus'] # depends on [control=['if'], data=[]]
if 'memory' in info and (not self.processes[process]['memory']):
try:
self.processes[process]['memory'] = self._size_coverter(info['memory']) # depends on [control=['try'], data=[]]
except ValueError:
self.processes[process]['memory'] = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if info['hash'] in self.stored_ids:
return # depends on [control=['if'], data=[]]
# If the task hash code is provided, expand it to the work directory
# and add a new entry
if 'hash' in info:
hs = info['hash']
info['work_dir'] = self._expand_path(hs) # depends on [control=['if'], data=['info']]
if 'tag' in info:
tag = info['tag']
if tag != '-' and tag not in self.samples and (tag.split()[0] not in self.samples):
self.samples.append(tag) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['info']]
self.trace_info[process].append(info)
self.stored_ids.append(info['hash']) |
def _split_section_and_key(key):
"""Return a tuple with config section and key."""
parts = key.split('.')
if len(parts) > 1:
return 'renku "{0}"'.format(parts[0]), '.'.join(parts[1:])
return 'renku', key | def function[_split_section_and_key, parameter[key]]:
constant[Return a tuple with config section and key.]
variable[parts] assign[=] call[name[key].split, parameter[constant[.]]]
if compare[call[name[len], parameter[name[parts]]] greater[>] constant[1]] begin[:]
return[tuple[[<ast.Call object at 0x7da18dc9a5f0>, <ast.Call object at 0x7da18dc9bd60>]]]
return[tuple[[<ast.Constant object at 0x7da1b042dc30>, <ast.Name object at 0x7da1b042e6b0>]]] | keyword[def] identifier[_split_section_and_key] ( identifier[key] ):
literal[string]
identifier[parts] = identifier[key] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )> literal[int] :
keyword[return] literal[string] . identifier[format] ( identifier[parts] [ literal[int] ]), literal[string] . identifier[join] ( identifier[parts] [ literal[int] :])
keyword[return] literal[string] , identifier[key] | def _split_section_and_key(key):
"""Return a tuple with config section and key."""
parts = key.split('.')
if len(parts) > 1:
return ('renku "{0}"'.format(parts[0]), '.'.join(parts[1:])) # depends on [control=['if'], data=[]]
return ('renku', key) |
def accuracy(sess, model, x, y, batch_size=None, devices=None, feed=None,
attack=None, attack_params=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param model: cleverhans.model.Model instance
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param attack: cleverhans.attack.Attack
Optional. If no attack specified, evaluates the model on clean data.
If attack is specified, evaluates the model on adversarial examples
created by the attack.
:param attack_params: dictionary
If attack is specified, this dictionary is passed to attack.generate
as keyword arguments.
:return: a float with the accuracy value
"""
_check_x(x)
_check_y(y)
if x.shape[0] != y.shape[0]:
raise ValueError("Number of input examples and labels do not match.")
factory = _CorrectFactory(model, attack, attack_params)
correct, = batch_eval_multi_worker(sess, factory, [x, y],
batch_size=batch_size, devices=devices,
feed=feed)
return correct.mean() | def function[accuracy, parameter[sess, model, x, y, batch_size, devices, feed, attack, attack_params]]:
constant[
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param model: cleverhans.model.Model instance
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param attack: cleverhans.attack.Attack
Optional. If no attack specified, evaluates the model on clean data.
If attack is specified, evaluates the model on adversarial examples
created by the attack.
:param attack_params: dictionary
If attack is specified, this dictionary is passed to attack.generate
as keyword arguments.
:return: a float with the accuracy value
]
call[name[_check_x], parameter[name[x]]]
call[name[_check_y], parameter[name[y]]]
if compare[call[name[x].shape][constant[0]] not_equal[!=] call[name[y].shape][constant[0]]] begin[:]
<ast.Raise object at 0x7da20c6e46a0>
variable[factory] assign[=] call[name[_CorrectFactory], parameter[name[model], name[attack], name[attack_params]]]
<ast.Tuple object at 0x7da20c6e6bf0> assign[=] call[name[batch_eval_multi_worker], parameter[name[sess], name[factory], list[[<ast.Name object at 0x7da20c6e4d90>, <ast.Name object at 0x7da1b1f75cc0>]]]]
return[call[name[correct].mean, parameter[]]] | keyword[def] identifier[accuracy] ( identifier[sess] , identifier[model] , identifier[x] , identifier[y] , identifier[batch_size] = keyword[None] , identifier[devices] = keyword[None] , identifier[feed] = keyword[None] ,
identifier[attack] = keyword[None] , identifier[attack_params] = keyword[None] ):
literal[string]
identifier[_check_x] ( identifier[x] )
identifier[_check_y] ( identifier[y] )
keyword[if] identifier[x] . identifier[shape] [ literal[int] ]!= identifier[y] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[factory] = identifier[_CorrectFactory] ( identifier[model] , identifier[attack] , identifier[attack_params] )
identifier[correct] ,= identifier[batch_eval_multi_worker] ( identifier[sess] , identifier[factory] ,[ identifier[x] , identifier[y] ],
identifier[batch_size] = identifier[batch_size] , identifier[devices] = identifier[devices] ,
identifier[feed] = identifier[feed] )
keyword[return] identifier[correct] . identifier[mean] () | def accuracy(sess, model, x, y, batch_size=None, devices=None, feed=None, attack=None, attack_params=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param model: cleverhans.model.Model instance
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param attack: cleverhans.attack.Attack
Optional. If no attack specified, evaluates the model on clean data.
If attack is specified, evaluates the model on adversarial examples
created by the attack.
:param attack_params: dictionary
If attack is specified, this dictionary is passed to attack.generate
as keyword arguments.
:return: a float with the accuracy value
"""
_check_x(x)
_check_y(y)
if x.shape[0] != y.shape[0]:
raise ValueError('Number of input examples and labels do not match.') # depends on [control=['if'], data=[]]
factory = _CorrectFactory(model, attack, attack_params)
(correct,) = batch_eval_multi_worker(sess, factory, [x, y], batch_size=batch_size, devices=devices, feed=feed)
return correct.mean() |
def _next_code_point_handler(whence, ctx):
"""Retrieves the next code point from within a quoted string or symbol."""
data_event, self = yield
queue = ctx.queue
unicode_escapes_allowed = ctx.ion_type is not IonType.CLOB
escaped_newline = False
escape_sequence = b''
low_surrogate_required = False
while True:
if len(queue) == 0:
yield ctx.read_data_event(self)
queue_iter = iter(queue)
code_point_generator = _next_code_point_iter(queue, queue_iter)
code_point = next(code_point_generator)
if code_point == _BACKSLASH:
escape_sequence += six.int2byte(_BACKSLASH)
num_digits = None
while True:
if len(queue) == 0:
yield ctx.read_data_event(self)
code_point = next(queue_iter)
if six.indexbytes(escape_sequence, -1) == _BACKSLASH:
if code_point == _ord(b'u') and unicode_escapes_allowed:
# 4-digit unicode escapes, plus '\u' for each surrogate
num_digits = 12 if low_surrogate_required else 6
low_surrogate_required = False
elif low_surrogate_required:
_illegal_character(code_point, ctx,
'Unpaired high surrogate escape sequence %s.' % (escape_sequence,))
elif code_point == _ord(b'x'):
num_digits = 4 # 2-digit hex escapes
elif code_point == _ord(b'U') and unicode_escapes_allowed:
num_digits = 10 # 8-digit unicode escapes
elif code_point in _COMMON_ESCAPES:
if code_point == _SLASH or code_point == _QUESTION_MARK:
escape_sequence = b'' # Drop the \. Python does not recognize these as escapes.
escape_sequence += six.int2byte(code_point)
break
elif code_point in _NEWLINES:
escaped_newline = True
break
else:
# This is a backslash followed by an invalid escape character. This is illegal.
_illegal_character(code_point, ctx, 'Invalid escape sequence \\%s.' % (_chr(code_point),))
escape_sequence += six.int2byte(code_point)
else:
if code_point not in _HEX_DIGITS:
_illegal_character(code_point, ctx,
'Non-hex character %s found in unicode escape.' % (_chr(code_point),))
escape_sequence += six.int2byte(code_point)
if len(escape_sequence) == num_digits:
break
if not escaped_newline:
decoded_escape_sequence = escape_sequence.decode('unicode-escape')
cp_iter = _next_code_point_iter(decoded_escape_sequence, iter(decoded_escape_sequence), to_int=ord)
code_point = next(cp_iter)
if code_point is None:
# This is a high surrogate. Restart the loop to gather the low surrogate.
low_surrogate_required = True
continue
code_point = CodePoint(code_point)
code_point.char = decoded_escape_sequence
code_point.is_escaped = True
ctx.set_code_point(code_point)
yield Transition(None, whence)
elif low_surrogate_required:
_illegal_character(code_point, ctx, 'Unpaired high surrogate escape sequence %s.' % (escape_sequence,))
if code_point == _CARRIAGE_RETURN:
# Normalize all newlines (\r, \n, and \r\n) to \n .
if len(queue) == 0:
yield ctx.read_data_event(self)
code_point = next(queue_iter)
if code_point != _NEWLINE:
queue.unread(code_point)
code_point = _NEWLINE
while code_point is None:
yield ctx.read_data_event(self)
code_point = next(code_point_generator)
if escaped_newline:
code_point = CodePoint(code_point)
code_point.char = _ESCAPED_NEWLINE
code_point.is_escaped = True
ctx.set_code_point(code_point)
yield Transition(None, whence) | def function[_next_code_point_handler, parameter[whence, ctx]]:
constant[Retrieves the next code point from within a quoted string or symbol.]
<ast.Tuple object at 0x7da1b13c3190> assign[=] <ast.Yield object at 0x7da1b13c2350>
variable[queue] assign[=] name[ctx].queue
variable[unicode_escapes_allowed] assign[=] compare[name[ctx].ion_type is_not name[IonType].CLOB]
variable[escaped_newline] assign[=] constant[False]
variable[escape_sequence] assign[=] constant[b'']
variable[low_surrogate_required] assign[=] constant[False]
while constant[True] begin[:]
if compare[call[name[len], parameter[name[queue]]] equal[==] constant[0]] begin[:]
<ast.Yield object at 0x7da1b13c19c0>
variable[queue_iter] assign[=] call[name[iter], parameter[name[queue]]]
variable[code_point_generator] assign[=] call[name[_next_code_point_iter], parameter[name[queue], name[queue_iter]]]
variable[code_point] assign[=] call[name[next], parameter[name[code_point_generator]]]
if compare[name[code_point] equal[==] name[_BACKSLASH]] begin[:]
<ast.AugAssign object at 0x7da1b1304c70>
variable[num_digits] assign[=] constant[None]
while constant[True] begin[:]
if compare[call[name[len], parameter[name[queue]]] equal[==] constant[0]] begin[:]
<ast.Yield object at 0x7da1b1307c70>
variable[code_point] assign[=] call[name[next], parameter[name[queue_iter]]]
if compare[call[name[six].indexbytes, parameter[name[escape_sequence], <ast.UnaryOp object at 0x7da1b13055a0>]] equal[==] name[_BACKSLASH]] begin[:]
if <ast.BoolOp object at 0x7da18f00e1a0> begin[:]
variable[num_digits] assign[=] <ast.IfExp object at 0x7da18f00db40>
variable[low_surrogate_required] assign[=] constant[False]
<ast.AugAssign object at 0x7da1b1549900>
if <ast.UnaryOp object at 0x7da1b1548e50> begin[:]
variable[decoded_escape_sequence] assign[=] call[name[escape_sequence].decode, parameter[constant[unicode-escape]]]
variable[cp_iter] assign[=] call[name[_next_code_point_iter], parameter[name[decoded_escape_sequence], call[name[iter], parameter[name[decoded_escape_sequence]]]]]
variable[code_point] assign[=] call[name[next], parameter[name[cp_iter]]]
if compare[name[code_point] is constant[None]] begin[:]
variable[low_surrogate_required] assign[=] constant[True]
continue
variable[code_point] assign[=] call[name[CodePoint], parameter[name[code_point]]]
name[code_point].char assign[=] name[decoded_escape_sequence]
name[code_point].is_escaped assign[=] constant[True]
call[name[ctx].set_code_point, parameter[name[code_point]]]
<ast.Yield object at 0x7da1b154a350>
if compare[name[code_point] equal[==] name[_CARRIAGE_RETURN]] begin[:]
if compare[call[name[len], parameter[name[queue]]] equal[==] constant[0]] begin[:]
<ast.Yield object at 0x7da207f039a0>
variable[code_point] assign[=] call[name[next], parameter[name[queue_iter]]]
if compare[name[code_point] not_equal[!=] name[_NEWLINE]] begin[:]
call[name[queue].unread, parameter[name[code_point]]]
variable[code_point] assign[=] name[_NEWLINE]
while compare[name[code_point] is constant[None]] begin[:]
<ast.Yield object at 0x7da207f03f40>
variable[code_point] assign[=] call[name[next], parameter[name[code_point_generator]]]
if name[escaped_newline] begin[:]
variable[code_point] assign[=] call[name[CodePoint], parameter[name[code_point]]]
name[code_point].char assign[=] name[_ESCAPED_NEWLINE]
name[code_point].is_escaped assign[=] constant[True]
call[name[ctx].set_code_point, parameter[name[code_point]]]
<ast.Yield object at 0x7da1b157e8f0> | keyword[def] identifier[_next_code_point_handler] ( identifier[whence] , identifier[ctx] ):
literal[string]
identifier[data_event] , identifier[self] = keyword[yield]
identifier[queue] = identifier[ctx] . identifier[queue]
identifier[unicode_escapes_allowed] = identifier[ctx] . identifier[ion_type] keyword[is] keyword[not] identifier[IonType] . identifier[CLOB]
identifier[escaped_newline] = keyword[False]
identifier[escape_sequence] = literal[string]
identifier[low_surrogate_required] = keyword[False]
keyword[while] keyword[True] :
keyword[if] identifier[len] ( identifier[queue] )== literal[int] :
keyword[yield] identifier[ctx] . identifier[read_data_event] ( identifier[self] )
identifier[queue_iter] = identifier[iter] ( identifier[queue] )
identifier[code_point_generator] = identifier[_next_code_point_iter] ( identifier[queue] , identifier[queue_iter] )
identifier[code_point] = identifier[next] ( identifier[code_point_generator] )
keyword[if] identifier[code_point] == identifier[_BACKSLASH] :
identifier[escape_sequence] += identifier[six] . identifier[int2byte] ( identifier[_BACKSLASH] )
identifier[num_digits] = keyword[None]
keyword[while] keyword[True] :
keyword[if] identifier[len] ( identifier[queue] )== literal[int] :
keyword[yield] identifier[ctx] . identifier[read_data_event] ( identifier[self] )
identifier[code_point] = identifier[next] ( identifier[queue_iter] )
keyword[if] identifier[six] . identifier[indexbytes] ( identifier[escape_sequence] ,- literal[int] )== identifier[_BACKSLASH] :
keyword[if] identifier[code_point] == identifier[_ord] ( literal[string] ) keyword[and] identifier[unicode_escapes_allowed] :
identifier[num_digits] = literal[int] keyword[if] identifier[low_surrogate_required] keyword[else] literal[int]
identifier[low_surrogate_required] = keyword[False]
keyword[elif] identifier[low_surrogate_required] :
identifier[_illegal_character] ( identifier[code_point] , identifier[ctx] ,
literal[string] %( identifier[escape_sequence] ,))
keyword[elif] identifier[code_point] == identifier[_ord] ( literal[string] ):
identifier[num_digits] = literal[int]
keyword[elif] identifier[code_point] == identifier[_ord] ( literal[string] ) keyword[and] identifier[unicode_escapes_allowed] :
identifier[num_digits] = literal[int]
keyword[elif] identifier[code_point] keyword[in] identifier[_COMMON_ESCAPES] :
keyword[if] identifier[code_point] == identifier[_SLASH] keyword[or] identifier[code_point] == identifier[_QUESTION_MARK] :
identifier[escape_sequence] = literal[string]
identifier[escape_sequence] += identifier[six] . identifier[int2byte] ( identifier[code_point] )
keyword[break]
keyword[elif] identifier[code_point] keyword[in] identifier[_NEWLINES] :
identifier[escaped_newline] = keyword[True]
keyword[break]
keyword[else] :
identifier[_illegal_character] ( identifier[code_point] , identifier[ctx] , literal[string] %( identifier[_chr] ( identifier[code_point] ),))
identifier[escape_sequence] += identifier[six] . identifier[int2byte] ( identifier[code_point] )
keyword[else] :
keyword[if] identifier[code_point] keyword[not] keyword[in] identifier[_HEX_DIGITS] :
identifier[_illegal_character] ( identifier[code_point] , identifier[ctx] ,
literal[string] %( identifier[_chr] ( identifier[code_point] ),))
identifier[escape_sequence] += identifier[six] . identifier[int2byte] ( identifier[code_point] )
keyword[if] identifier[len] ( identifier[escape_sequence] )== identifier[num_digits] :
keyword[break]
keyword[if] keyword[not] identifier[escaped_newline] :
identifier[decoded_escape_sequence] = identifier[escape_sequence] . identifier[decode] ( literal[string] )
identifier[cp_iter] = identifier[_next_code_point_iter] ( identifier[decoded_escape_sequence] , identifier[iter] ( identifier[decoded_escape_sequence] ), identifier[to_int] = identifier[ord] )
identifier[code_point] = identifier[next] ( identifier[cp_iter] )
keyword[if] identifier[code_point] keyword[is] keyword[None] :
identifier[low_surrogate_required] = keyword[True]
keyword[continue]
identifier[code_point] = identifier[CodePoint] ( identifier[code_point] )
identifier[code_point] . identifier[char] = identifier[decoded_escape_sequence]
identifier[code_point] . identifier[is_escaped] = keyword[True]
identifier[ctx] . identifier[set_code_point] ( identifier[code_point] )
keyword[yield] identifier[Transition] ( keyword[None] , identifier[whence] )
keyword[elif] identifier[low_surrogate_required] :
identifier[_illegal_character] ( identifier[code_point] , identifier[ctx] , literal[string] %( identifier[escape_sequence] ,))
keyword[if] identifier[code_point] == identifier[_CARRIAGE_RETURN] :
keyword[if] identifier[len] ( identifier[queue] )== literal[int] :
keyword[yield] identifier[ctx] . identifier[read_data_event] ( identifier[self] )
identifier[code_point] = identifier[next] ( identifier[queue_iter] )
keyword[if] identifier[code_point] != identifier[_NEWLINE] :
identifier[queue] . identifier[unread] ( identifier[code_point] )
identifier[code_point] = identifier[_NEWLINE]
keyword[while] identifier[code_point] keyword[is] keyword[None] :
keyword[yield] identifier[ctx] . identifier[read_data_event] ( identifier[self] )
identifier[code_point] = identifier[next] ( identifier[code_point_generator] )
keyword[if] identifier[escaped_newline] :
identifier[code_point] = identifier[CodePoint] ( identifier[code_point] )
identifier[code_point] . identifier[char] = identifier[_ESCAPED_NEWLINE]
identifier[code_point] . identifier[is_escaped] = keyword[True]
identifier[ctx] . identifier[set_code_point] ( identifier[code_point] )
keyword[yield] identifier[Transition] ( keyword[None] , identifier[whence] ) | def _next_code_point_handler(whence, ctx):
"""Retrieves the next code point from within a quoted string or symbol."""
(data_event, self) = (yield)
queue = ctx.queue
unicode_escapes_allowed = ctx.ion_type is not IonType.CLOB
escaped_newline = False
escape_sequence = b''
low_surrogate_required = False
while True:
if len(queue) == 0:
yield ctx.read_data_event(self) # depends on [control=['if'], data=[]]
queue_iter = iter(queue)
code_point_generator = _next_code_point_iter(queue, queue_iter)
code_point = next(code_point_generator)
if code_point == _BACKSLASH:
escape_sequence += six.int2byte(_BACKSLASH)
num_digits = None
while True:
if len(queue) == 0:
yield ctx.read_data_event(self) # depends on [control=['if'], data=[]]
code_point = next(queue_iter)
if six.indexbytes(escape_sequence, -1) == _BACKSLASH:
if code_point == _ord(b'u') and unicode_escapes_allowed:
# 4-digit unicode escapes, plus '\u' for each surrogate
num_digits = 12 if low_surrogate_required else 6
low_surrogate_required = False # depends on [control=['if'], data=[]]
elif low_surrogate_required:
_illegal_character(code_point, ctx, 'Unpaired high surrogate escape sequence %s.' % (escape_sequence,)) # depends on [control=['if'], data=[]]
elif code_point == _ord(b'x'):
num_digits = 4 # 2-digit hex escapes # depends on [control=['if'], data=[]]
elif code_point == _ord(b'U') and unicode_escapes_allowed:
num_digits = 10 # 8-digit unicode escapes # depends on [control=['if'], data=[]]
elif code_point in _COMMON_ESCAPES:
if code_point == _SLASH or code_point == _QUESTION_MARK:
escape_sequence = b'' # Drop the \. Python does not recognize these as escapes. # depends on [control=['if'], data=[]]
escape_sequence += six.int2byte(code_point)
break # depends on [control=['if'], data=['code_point']]
elif code_point in _NEWLINES:
escaped_newline = True
break # depends on [control=['if'], data=[]]
else:
# This is a backslash followed by an invalid escape character. This is illegal.
_illegal_character(code_point, ctx, 'Invalid escape sequence \\%s.' % (_chr(code_point),))
escape_sequence += six.int2byte(code_point) # depends on [control=['if'], data=[]]
else:
if code_point not in _HEX_DIGITS:
_illegal_character(code_point, ctx, 'Non-hex character %s found in unicode escape.' % (_chr(code_point),)) # depends on [control=['if'], data=['code_point']]
escape_sequence += six.int2byte(code_point)
if len(escape_sequence) == num_digits:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if not escaped_newline:
decoded_escape_sequence = escape_sequence.decode('unicode-escape')
cp_iter = _next_code_point_iter(decoded_escape_sequence, iter(decoded_escape_sequence), to_int=ord)
code_point = next(cp_iter)
if code_point is None:
# This is a high surrogate. Restart the loop to gather the low surrogate.
low_surrogate_required = True
continue # depends on [control=['if'], data=[]]
code_point = CodePoint(code_point)
code_point.char = decoded_escape_sequence
code_point.is_escaped = True
ctx.set_code_point(code_point)
yield Transition(None, whence) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['code_point', '_BACKSLASH']]
elif low_surrogate_required:
_illegal_character(code_point, ctx, 'Unpaired high surrogate escape sequence %s.' % (escape_sequence,)) # depends on [control=['if'], data=[]]
if code_point == _CARRIAGE_RETURN:
# Normalize all newlines (\r, \n, and \r\n) to \n .
if len(queue) == 0:
yield ctx.read_data_event(self) # depends on [control=['if'], data=[]]
code_point = next(queue_iter)
if code_point != _NEWLINE:
queue.unread(code_point)
code_point = _NEWLINE # depends on [control=['if'], data=['code_point', '_NEWLINE']] # depends on [control=['if'], data=['code_point']]
while code_point is None:
yield ctx.read_data_event(self)
code_point = next(code_point_generator) # depends on [control=['while'], data=['code_point']]
if escaped_newline:
code_point = CodePoint(code_point)
code_point.char = _ESCAPED_NEWLINE
code_point.is_escaped = True # depends on [control=['if'], data=[]]
ctx.set_code_point(code_point)
yield Transition(None, whence) # depends on [control=['while'], data=[]] |
def cookies(self):
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
return parse_cookie(
self.environ,
self.charset,
self.encoding_errors,
cls=self.dict_storage_class,
) | def function[cookies, parameter[self]]:
constant[A :class:`dict` with the contents of all cookies transmitted with
the request.]
return[call[name[parse_cookie], parameter[name[self].environ, name[self].charset, name[self].encoding_errors]]] | keyword[def] identifier[cookies] ( identifier[self] ):
literal[string]
keyword[return] identifier[parse_cookie] (
identifier[self] . identifier[environ] ,
identifier[self] . identifier[charset] ,
identifier[self] . identifier[encoding_errors] ,
identifier[cls] = identifier[self] . identifier[dict_storage_class] ,
) | def cookies(self):
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
return parse_cookie(self.environ, self.charset, self.encoding_errors, cls=self.dict_storage_class) |
def dfs_get_all_childs(self, root):
"""Recursively get all sons of this node
:param root: node to get sons
:type root:
:return: sons
:rtype: list
"""
self.nodes[root]['dfs_loop_status'] = 'DFS_CHECKED'
ret = set()
# Me
ret.add(root)
# And my sons
ret.update(self.nodes[root]['sons'])
for child in self.nodes[root]['sons']:
# I just don't care about already checked children
if self.nodes[child]['dfs_loop_status'] == 'DFS_UNCHECKED':
ret.update(self.dfs_get_all_childs(child))
return list(ret) | def function[dfs_get_all_childs, parameter[self, root]]:
constant[Recursively get all sons of this node
:param root: node to get sons
:type root:
:return: sons
:rtype: list
]
call[call[name[self].nodes][name[root]]][constant[dfs_loop_status]] assign[=] constant[DFS_CHECKED]
variable[ret] assign[=] call[name[set], parameter[]]
call[name[ret].add, parameter[name[root]]]
call[name[ret].update, parameter[call[call[name[self].nodes][name[root]]][constant[sons]]]]
for taget[name[child]] in starred[call[call[name[self].nodes][name[root]]][constant[sons]]] begin[:]
if compare[call[call[name[self].nodes][name[child]]][constant[dfs_loop_status]] equal[==] constant[DFS_UNCHECKED]] begin[:]
call[name[ret].update, parameter[call[name[self].dfs_get_all_childs, parameter[name[child]]]]]
return[call[name[list], parameter[name[ret]]]] | keyword[def] identifier[dfs_get_all_childs] ( identifier[self] , identifier[root] ):
literal[string]
identifier[self] . identifier[nodes] [ identifier[root] ][ literal[string] ]= literal[string]
identifier[ret] = identifier[set] ()
identifier[ret] . identifier[add] ( identifier[root] )
identifier[ret] . identifier[update] ( identifier[self] . identifier[nodes] [ identifier[root] ][ literal[string] ])
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[nodes] [ identifier[root] ][ literal[string] ]:
keyword[if] identifier[self] . identifier[nodes] [ identifier[child] ][ literal[string] ]== literal[string] :
identifier[ret] . identifier[update] ( identifier[self] . identifier[dfs_get_all_childs] ( identifier[child] ))
keyword[return] identifier[list] ( identifier[ret] ) | def dfs_get_all_childs(self, root):
"""Recursively get all sons of this node
:param root: node to get sons
:type root:
:return: sons
:rtype: list
"""
self.nodes[root]['dfs_loop_status'] = 'DFS_CHECKED'
ret = set()
# Me
ret.add(root)
# And my sons
ret.update(self.nodes[root]['sons'])
for child in self.nodes[root]['sons']:
# I just don't care about already checked children
if self.nodes[child]['dfs_loop_status'] == 'DFS_UNCHECKED':
ret.update(self.dfs_get_all_childs(child)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
return list(ret) |
def _add_container(props_node, tag, container):
"""
Walks through the given container and fills the node
:param props_node: A property node
:param tag: Name of the container tag
:param container: The container
"""
values_node = ElementTree.SubElement(props_node, tag)
for value in container:
value_node = ElementTree.SubElement(values_node, TAG_VALUE)
value_node.text = str(value) | def function[_add_container, parameter[props_node, tag, container]]:
constant[
Walks through the given container and fills the node
:param props_node: A property node
:param tag: Name of the container tag
:param container: The container
]
variable[values_node] assign[=] call[name[ElementTree].SubElement, parameter[name[props_node], name[tag]]]
for taget[name[value]] in starred[name[container]] begin[:]
variable[value_node] assign[=] call[name[ElementTree].SubElement, parameter[name[values_node], name[TAG_VALUE]]]
name[value_node].text assign[=] call[name[str], parameter[name[value]]] | keyword[def] identifier[_add_container] ( identifier[props_node] , identifier[tag] , identifier[container] ):
literal[string]
identifier[values_node] = identifier[ElementTree] . identifier[SubElement] ( identifier[props_node] , identifier[tag] )
keyword[for] identifier[value] keyword[in] identifier[container] :
identifier[value_node] = identifier[ElementTree] . identifier[SubElement] ( identifier[values_node] , identifier[TAG_VALUE] )
identifier[value_node] . identifier[text] = identifier[str] ( identifier[value] ) | def _add_container(props_node, tag, container):
"""
Walks through the given container and fills the node
:param props_node: A property node
:param tag: Name of the container tag
:param container: The container
"""
values_node = ElementTree.SubElement(props_node, tag)
for value in container:
value_node = ElementTree.SubElement(values_node, TAG_VALUE)
value_node.text = str(value) # depends on [control=['for'], data=['value']] |
def _merge_data(self, data: AnyMapping, *additional: AnyMapping) -> dict:
r"""Merge base data and additional dicts.
:param data: Base data.
:param \*additional: Additional data dicts to be merged into base dict.
"""
return defaults(
dict(data) if not isinstance(data, dict) else data,
*(dict(item) for item in additional)) | def function[_merge_data, parameter[self, data]]:
constant[Merge base data and additional dicts.
:param data: Base data.
:param \*additional: Additional data dicts to be merged into base dict.
]
return[call[name[defaults], parameter[<ast.IfExp object at 0x7da1b1a3e830>, <ast.Starred object at 0x7da1b1a3efe0>]]] | keyword[def] identifier[_merge_data] ( identifier[self] , identifier[data] : identifier[AnyMapping] ,* identifier[additional] : identifier[AnyMapping] )-> identifier[dict] :
literal[string]
keyword[return] identifier[defaults] (
identifier[dict] ( identifier[data] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[dict] ) keyword[else] identifier[data] ,
*( identifier[dict] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[additional] )) | def _merge_data(self, data: AnyMapping, *additional: AnyMapping) -> dict:
"""Merge base data and additional dicts.
:param data: Base data.
:param \\*additional: Additional data dicts to be merged into base dict.
"""
return defaults(dict(data) if not isinstance(data, dict) else data, *(dict(item) for item in additional)) |
def tuple_len(self):
"""
Length of tuples produced by this generator.
"""
try:
return self._tuple_len
except AttributeError:
raise NotImplementedError("Class {} does not implement attribute 'tuple_len'.".format(self.__class__.__name__)) | def function[tuple_len, parameter[self]]:
constant[
Length of tuples produced by this generator.
]
<ast.Try object at 0x7da1b10a4640> | keyword[def] identifier[tuple_len] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_tuple_len]
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] )) | def tuple_len(self):
"""
Length of tuples produced by this generator.
"""
try:
return self._tuple_len # depends on [control=['try'], data=[]]
except AttributeError:
raise NotImplementedError("Class {} does not implement attribute 'tuple_len'.".format(self.__class__.__name__)) # depends on [control=['except'], data=[]] |
def args_repr(*args, **kwargs):
"""
Returns human-readable string representation of both positional and
keyword arguments passed to the function.
This function uses the built-in :func:`repr()` function to convert
individual arguments to string.
>>> args_repr("a", (1, 2), some_keyword = list("abc"))
"'a', (1, 2), some_keyword = ['a', 'b', 'c']"
"""
items = [repr(a) for a in args]
items += ["%s = %r" % (k, v) for k, v in iter(kwargs.items())]
return ", ".join(items) | def function[args_repr, parameter[]]:
constant[
Returns human-readable string representation of both positional and
keyword arguments passed to the function.
This function uses the built-in :func:`repr()` function to convert
individual arguments to string.
>>> args_repr("a", (1, 2), some_keyword = list("abc"))
"'a', (1, 2), some_keyword = ['a', 'b', 'c']"
]
variable[items] assign[=] <ast.ListComp object at 0x7da20c6c5e70>
<ast.AugAssign object at 0x7da20c6c4820>
return[call[constant[, ].join, parameter[name[items]]]] | keyword[def] identifier[args_repr] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[items] =[ identifier[repr] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[args] ]
identifier[items] +=[ literal[string] %( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iter] ( identifier[kwargs] . identifier[items] ())]
keyword[return] literal[string] . identifier[join] ( identifier[items] ) | def args_repr(*args, **kwargs):
"""
Returns human-readable string representation of both positional and
keyword arguments passed to the function.
This function uses the built-in :func:`repr()` function to convert
individual arguments to string.
>>> args_repr("a", (1, 2), some_keyword = list("abc"))
"'a', (1, 2), some_keyword = ['a', 'b', 'c']"
"""
items = [repr(a) for a in args]
items += ['%s = %r' % (k, v) for (k, v) in iter(kwargs.items())]
return ', '.join(items) |
def averageSequenceAccuracy(self, minOverlap, maxOverlap,
firstStat=0, lastStat=None):
"""
For each object, decide whether the TM uniquely classified it by checking
that the number of predictedActive cells are in an acceptable range.
"""
numCorrectSparsity = 0.0
numCorrectClassifications = 0.0
numStats = 0.0
# For each object or sequence we classify every point or element
#
# A sequence element is considered correctly classified only if the number
# of predictedActive cells is within a reasonable range and if the KNN
# Classifier correctly classifies the active cell representation as
# belonging to this sequence.
#
# A point on an object is considered correctly classified by the TM if the
# number of predictedActive cells is within range.
for stats in self.statistics[firstStat:lastStat]:
# Keep running total of how often the number of predictedActive cells are
# in the range. We always skip the first (unpredictable) count.
predictedActiveStat = stats["TM PredictedActive C0"][1:]
TMRepresentationStat = stats["TM Full Representation C0"][1:]
# print "\n-----------"
# print stats["object"], predictedActiveStat
for numCells,sdr in zip(predictedActiveStat, TMRepresentationStat):
numStats += 1.0
# print "numCells: ", numCells
if numCells in range(minOverlap, maxOverlap + 1):
numCorrectSparsity += 1.0
# Check KNN Classifier
sdr = list(sdr)
sdr.sort()
dense = numpy.zeros(self.numTMCells)
dense[sdr] = 1.0
(winner, inferenceResult, dist, categoryDist) = \
self.classifier.infer(dense)
# print sdr, winner, stats['object'], winner == stats['object']
# print categoryDist
# print
if winner == stats['object']:
numCorrectClassifications += 1.0
if numStats==0:
return 0.0, 0.0
return ((numCorrectSparsity / numStats),
(numCorrectClassifications / numStats) ) | def function[averageSequenceAccuracy, parameter[self, minOverlap, maxOverlap, firstStat, lastStat]]:
constant[
For each object, decide whether the TM uniquely classified it by checking
that the number of predictedActive cells are in an acceptable range.
]
variable[numCorrectSparsity] assign[=] constant[0.0]
variable[numCorrectClassifications] assign[=] constant[0.0]
variable[numStats] assign[=] constant[0.0]
for taget[name[stats]] in starred[call[name[self].statistics][<ast.Slice object at 0x7da1b086eef0>]] begin[:]
variable[predictedActiveStat] assign[=] call[call[name[stats]][constant[TM PredictedActive C0]]][<ast.Slice object at 0x7da1b086d0f0>]
variable[TMRepresentationStat] assign[=] call[call[name[stats]][constant[TM Full Representation C0]]][<ast.Slice object at 0x7da1b086d5d0>]
for taget[tuple[[<ast.Name object at 0x7da1b086d450>, <ast.Name object at 0x7da1b086edd0>]]] in starred[call[name[zip], parameter[name[predictedActiveStat], name[TMRepresentationStat]]]] begin[:]
<ast.AugAssign object at 0x7da1b086c2e0>
if compare[name[numCells] in call[name[range], parameter[name[minOverlap], binary_operation[name[maxOverlap] + constant[1]]]]] begin[:]
<ast.AugAssign object at 0x7da1b08b89a0>
variable[sdr] assign[=] call[name[list], parameter[name[sdr]]]
call[name[sdr].sort, parameter[]]
variable[dense] assign[=] call[name[numpy].zeros, parameter[name[self].numTMCells]]
call[name[dense]][name[sdr]] assign[=] constant[1.0]
<ast.Tuple object at 0x7da1b08bbf10> assign[=] call[name[self].classifier.infer, parameter[name[dense]]]
if compare[name[winner] equal[==] call[name[stats]][constant[object]]] begin[:]
<ast.AugAssign object at 0x7da1b08b97e0>
if compare[name[numStats] equal[==] constant[0]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b085fb50>, <ast.Constant object at 0x7da1b085fb20>]]]
return[tuple[[<ast.BinOp object at 0x7da1b085ecb0>, <ast.BinOp object at 0x7da1b085fbe0>]]] | keyword[def] identifier[averageSequenceAccuracy] ( identifier[self] , identifier[minOverlap] , identifier[maxOverlap] ,
identifier[firstStat] = literal[int] , identifier[lastStat] = keyword[None] ):
literal[string]
identifier[numCorrectSparsity] = literal[int]
identifier[numCorrectClassifications] = literal[int]
identifier[numStats] = literal[int]
keyword[for] identifier[stats] keyword[in] identifier[self] . identifier[statistics] [ identifier[firstStat] : identifier[lastStat] ]:
identifier[predictedActiveStat] = identifier[stats] [ literal[string] ][ literal[int] :]
identifier[TMRepresentationStat] = identifier[stats] [ literal[string] ][ literal[int] :]
keyword[for] identifier[numCells] , identifier[sdr] keyword[in] identifier[zip] ( identifier[predictedActiveStat] , identifier[TMRepresentationStat] ):
identifier[numStats] += literal[int]
keyword[if] identifier[numCells] keyword[in] identifier[range] ( identifier[minOverlap] , identifier[maxOverlap] + literal[int] ):
identifier[numCorrectSparsity] += literal[int]
identifier[sdr] = identifier[list] ( identifier[sdr] )
identifier[sdr] . identifier[sort] ()
identifier[dense] = identifier[numpy] . identifier[zeros] ( identifier[self] . identifier[numTMCells] )
identifier[dense] [ identifier[sdr] ]= literal[int]
( identifier[winner] , identifier[inferenceResult] , identifier[dist] , identifier[categoryDist] )= identifier[self] . identifier[classifier] . identifier[infer] ( identifier[dense] )
keyword[if] identifier[winner] == identifier[stats] [ literal[string] ]:
identifier[numCorrectClassifications] += literal[int]
keyword[if] identifier[numStats] == literal[int] :
keyword[return] literal[int] , literal[int]
keyword[return] (( identifier[numCorrectSparsity] / identifier[numStats] ),
( identifier[numCorrectClassifications] / identifier[numStats] )) | def averageSequenceAccuracy(self, minOverlap, maxOverlap, firstStat=0, lastStat=None):
"""
For each object, decide whether the TM uniquely classified it by checking
that the number of predictedActive cells are in an acceptable range.
"""
numCorrectSparsity = 0.0
numCorrectClassifications = 0.0
numStats = 0.0
# For each object or sequence we classify every point or element
#
# A sequence element is considered correctly classified only if the number
# of predictedActive cells is within a reasonable range and if the KNN
# Classifier correctly classifies the active cell representation as
# belonging to this sequence.
#
# A point on an object is considered correctly classified by the TM if the
# number of predictedActive cells is within range.
for stats in self.statistics[firstStat:lastStat]:
# Keep running total of how often the number of predictedActive cells are
# in the range. We always skip the first (unpredictable) count.
predictedActiveStat = stats['TM PredictedActive C0'][1:]
TMRepresentationStat = stats['TM Full Representation C0'][1:]
# print "\n-----------"
# print stats["object"], predictedActiveStat
for (numCells, sdr) in zip(predictedActiveStat, TMRepresentationStat):
numStats += 1.0
# print "numCells: ", numCells
if numCells in range(minOverlap, maxOverlap + 1):
numCorrectSparsity += 1.0
# Check KNN Classifier
sdr = list(sdr)
sdr.sort()
dense = numpy.zeros(self.numTMCells)
dense[sdr] = 1.0
(winner, inferenceResult, dist, categoryDist) = self.classifier.infer(dense)
# print sdr, winner, stats['object'], winner == stats['object']
# print categoryDist
# print
if winner == stats['object']:
numCorrectClassifications += 1.0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['stats']]
if numStats == 0:
return (0.0, 0.0) # depends on [control=['if'], data=[]]
return (numCorrectSparsity / numStats, numCorrectClassifications / numStats) |
def replace_runtime_class(self, name, body, **kwargs):
"""
replace the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_runtime_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_runtime_class_with_http_info(name, body, **kwargs)
return data | def function[replace_runtime_class, parameter[self, name, body]]:
constant[
replace the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].replace_runtime_class_with_http_info, parameter[name[name], name[body]]]] | keyword[def] identifier[replace_runtime_class] ( identifier[self] , identifier[name] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[replace_runtime_class_with_http_info] ( identifier[name] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[replace_runtime_class_with_http_info] ( identifier[name] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def replace_runtime_class(self, name, body, **kwargs):
"""
replace the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_runtime_class_with_http_info(name, body, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.replace_runtime_class_with_http_info(name, body, **kwargs)
return data |
def FromBinary(cls, record_data, record_count=1):
"""Create an UpdateRecord subclass from binary record data.
This should be called with a binary record blob (NOT including the
record type header) and it will decode it into a ResetDeviceRecord.
Args:
record_data (bytearray): The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header.
record_count (int): The number of records included in record_data.
Raises:
ArgumentError: If the record_data is malformed and cannot be parsed.
Returns:
ResetDeviceRecord: The decoded reflash tile record.
"""
if len(record_data) != 0:
raise ArgumentError("Reset device record should have no included data", length=len(record_data))
return ResetDeviceRecord() | def function[FromBinary, parameter[cls, record_data, record_count]]:
constant[Create an UpdateRecord subclass from binary record data.
This should be called with a binary record blob (NOT including the
record type header) and it will decode it into a ResetDeviceRecord.
Args:
record_data (bytearray): The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header.
record_count (int): The number of records included in record_data.
Raises:
ArgumentError: If the record_data is malformed and cannot be parsed.
Returns:
ResetDeviceRecord: The decoded reflash tile record.
]
if compare[call[name[len], parameter[name[record_data]]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da18f00df90>
return[call[name[ResetDeviceRecord], parameter[]]] | keyword[def] identifier[FromBinary] ( identifier[cls] , identifier[record_data] , identifier[record_count] = literal[int] ):
literal[string]
keyword[if] identifier[len] ( identifier[record_data] )!= literal[int] :
keyword[raise] identifier[ArgumentError] ( literal[string] , identifier[length] = identifier[len] ( identifier[record_data] ))
keyword[return] identifier[ResetDeviceRecord] () | def FromBinary(cls, record_data, record_count=1):
"""Create an UpdateRecord subclass from binary record data.
This should be called with a binary record blob (NOT including the
record type header) and it will decode it into a ResetDeviceRecord.
Args:
record_data (bytearray): The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header.
record_count (int): The number of records included in record_data.
Raises:
ArgumentError: If the record_data is malformed and cannot be parsed.
Returns:
ResetDeviceRecord: The decoded reflash tile record.
"""
if len(record_data) != 0:
raise ArgumentError('Reset device record should have no included data', length=len(record_data)) # depends on [control=['if'], data=[]]
return ResetDeviceRecord() |
def fader(self, value: int):
"""Move the fader to a new position in the range 0 to 1023."""
self._fader = int(value) if 0 < value < 1024 else 0
self.outport.send(mido.Message('control_change', control=0,
value=self._fader >> 7))
self.outport.send(mido.Message('control_change', control=32,
value=self._fader & 0x7F)) | def function[fader, parameter[self, value]]:
constant[Move the fader to a new position in the range 0 to 1023.]
name[self]._fader assign[=] <ast.IfExp object at 0x7da2054a6d40>
call[name[self].outport.send, parameter[call[name[mido].Message, parameter[constant[control_change]]]]]
call[name[self].outport.send, parameter[call[name[mido].Message, parameter[constant[control_change]]]]] | keyword[def] identifier[fader] ( identifier[self] , identifier[value] : identifier[int] ):
literal[string]
identifier[self] . identifier[_fader] = identifier[int] ( identifier[value] ) keyword[if] literal[int] < identifier[value] < literal[int] keyword[else] literal[int]
identifier[self] . identifier[outport] . identifier[send] ( identifier[mido] . identifier[Message] ( literal[string] , identifier[control] = literal[int] ,
identifier[value] = identifier[self] . identifier[_fader] >> literal[int] ))
identifier[self] . identifier[outport] . identifier[send] ( identifier[mido] . identifier[Message] ( literal[string] , identifier[control] = literal[int] ,
identifier[value] = identifier[self] . identifier[_fader] & literal[int] )) | def fader(self, value: int):
"""Move the fader to a new position in the range 0 to 1023."""
self._fader = int(value) if 0 < value < 1024 else 0
self.outport.send(mido.Message('control_change', control=0, value=self._fader >> 7))
self.outport.send(mido.Message('control_change', control=32, value=self._fader & 127)) |
def _set_af_ipv4_uc_and_vrf_cmds_call_point_holder(self, v, load=False):
"""
Setter method for af_ipv4_uc_and_vrf_cmds_call_point_holder, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_ipv4_uc_and_vrf_cmds_call_point_holder is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_ipv4_uc_and_vrf_cmds_call_point_holder() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=af_ipv4_uc_and_vrf_cmds_call_point_holder.af_ipv4_uc_and_vrf_cmds_call_point_holder, is_container='container', presence=False, yang_name="af-ipv4-uc-and-vrf-cmds-call-point-holder", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'AfIpv4Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """af_ipv4_uc_and_vrf_cmds_call_point_holder must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=af_ipv4_uc_and_vrf_cmds_call_point_holder.af_ipv4_uc_and_vrf_cmds_call_point_holder, is_container='container', presence=False, yang_name="af-ipv4-uc-and-vrf-cmds-call-point-holder", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'AfIpv4Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__af_ipv4_uc_and_vrf_cmds_call_point_holder = t
if hasattr(self, '_set'):
self._set() | def function[_set_af_ipv4_uc_and_vrf_cmds_call_point_holder, parameter[self, v, load]]:
constant[
Setter method for af_ipv4_uc_and_vrf_cmds_call_point_holder, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_ipv4_uc_and_vrf_cmds_call_point_holder is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_ipv4_uc_and_vrf_cmds_call_point_holder() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da1b26af2e0>
name[self].__af_ipv4_uc_and_vrf_cmds_call_point_holder assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_af_ipv4_uc_and_vrf_cmds_call_point_holder] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[af_ipv4_uc_and_vrf_cmds_call_point_holder] . identifier[af_ipv4_uc_and_vrf_cmds_call_point_holder] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__af_ipv4_uc_and_vrf_cmds_call_point_holder] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_af_ipv4_uc_and_vrf_cmds_call_point_holder(self, v, load=False):
"""
Setter method for af_ipv4_uc_and_vrf_cmds_call_point_holder, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_ipv4_uc_and_vrf_cmds_call_point_holder is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_ipv4_uc_and_vrf_cmds_call_point_holder() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=af_ipv4_uc_and_vrf_cmds_call_point_holder.af_ipv4_uc_and_vrf_cmds_call_point_holder, is_container='container', presence=False, yang_name='af-ipv4-uc-and-vrf-cmds-call-point-holder', rest_name='', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'AfIpv4Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'af_ipv4_uc_and_vrf_cmds_call_point_holder must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=af_ipv4_uc_and_vrf_cmds_call_point_holder.af_ipv4_uc_and_vrf_cmds_call_point_holder, is_container=\'container\', presence=False, yang_name="af-ipv4-uc-and-vrf-cmds-call-point-holder", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-drop-node-name\': None, u\'callpoint\': u\'AfIpv4Ucast\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__af_ipv4_uc_and_vrf_cmds_call_point_holder = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def get_now_datetime_filestamp(longTime=False):
"""
*A datetime stamp to be appended to the end of filenames: ``YYYYMMDDtHHMMSS``*
**Key Arguments:**
- ``longTime`` -- make time string longer (more change of filenames being unique)
**Return:**
- ``now`` -- current time and date in filename format
**Usage:**
.. code-block:: python
from fundamentals.download import get_now_datetime_filestamp
get_now_datetime_filestamp(longTime=False)
#Out: '20160316t154635'
get_now_datetime_filestamp(longTime=True)
#Out: '20160316t154644133638'
"""
## > IMPORTS ##
from datetime import datetime, date, time
now = datetime.now()
if longTime:
now = now.strftime("%Y%m%dt%H%M%S%f")
else:
now = now.strftime("%Y%m%dt%H%M%S")
return now | def function[get_now_datetime_filestamp, parameter[longTime]]:
constant[
*A datetime stamp to be appended to the end of filenames: ``YYYYMMDDtHHMMSS``*
**Key Arguments:**
- ``longTime`` -- make time string longer (more change of filenames being unique)
**Return:**
- ``now`` -- current time and date in filename format
**Usage:**
.. code-block:: python
from fundamentals.download import get_now_datetime_filestamp
get_now_datetime_filestamp(longTime=False)
#Out: '20160316t154635'
get_now_datetime_filestamp(longTime=True)
#Out: '20160316t154644133638'
]
from relative_module[datetime] import module[datetime], module[date], module[time]
variable[now] assign[=] call[name[datetime].now, parameter[]]
if name[longTime] begin[:]
variable[now] assign[=] call[name[now].strftime, parameter[constant[%Y%m%dt%H%M%S%f]]]
return[name[now]] | keyword[def] identifier[get_now_datetime_filestamp] ( identifier[longTime] = keyword[False] ):
literal[string]
keyword[from] identifier[datetime] keyword[import] identifier[datetime] , identifier[date] , identifier[time]
identifier[now] = identifier[datetime] . identifier[now] ()
keyword[if] identifier[longTime] :
identifier[now] = identifier[now] . identifier[strftime] ( literal[string] )
keyword[else] :
identifier[now] = identifier[now] . identifier[strftime] ( literal[string] )
keyword[return] identifier[now] | def get_now_datetime_filestamp(longTime=False):
"""
*A datetime stamp to be appended to the end of filenames: ``YYYYMMDDtHHMMSS``*
**Key Arguments:**
- ``longTime`` -- make time string longer (more change of filenames being unique)
**Return:**
- ``now`` -- current time and date in filename format
**Usage:**
.. code-block:: python
from fundamentals.download import get_now_datetime_filestamp
get_now_datetime_filestamp(longTime=False)
#Out: '20160316t154635'
get_now_datetime_filestamp(longTime=True)
#Out: '20160316t154644133638'
"""
## > IMPORTS ##
from datetime import datetime, date, time
now = datetime.now()
if longTime:
now = now.strftime('%Y%m%dt%H%M%S%f') # depends on [control=['if'], data=[]]
else:
now = now.strftime('%Y%m%dt%H%M%S')
return now |
def get_response(url, plugins, timeout=SPLASH_TIMEOUT):
"""
Return response with HAR, inline scritps and software detected by JS matchers.
:rtype: dict
"""
lua_script = create_lua_script(plugins)
lua = urllib.parse.quote_plus(lua_script)
page_url = f'{SPLASH_URL}/execute?url={url}&timeout={timeout}&lua_source={lua}'
try:
with docker_container():
logger.debug('[+] Sending request to Splash instance')
res = requests.get(page_url)
except requests.exceptions.ConnectionError:
raise SplashError("Could not connect to Splash server {}".format(SPLASH_URL))
logger.debug('[+] Response received')
json_data = res.json()
if res.status_code in ERROR_STATUS_CODES:
raise SplashError(get_splash_error(json_data))
softwares = json_data['softwares']
scripts = json_data['scripts'].values()
har = get_valid_har(json_data['har'])
js_error = get_evaljs_error(json_data)
if js_error:
logger.debug('[+] WARNING: failed to eval JS matchers: %(n)s', {'n': js_error})
else:
logger.debug('[+] Detected %(n)d softwares from the DOM', {'n': len(softwares)})
logger.debug('[+] Detected %(n)d scripts from the DOM', {'n': len(scripts)})
logger.debug('[+] Final HAR has %(n)d valid entries', {'n': len(har)})
return {'har': har, 'scripts': scripts, 'softwares': softwares} | def function[get_response, parameter[url, plugins, timeout]]:
constant[
Return response with HAR, inline scritps and software detected by JS matchers.
:rtype: dict
]
variable[lua_script] assign[=] call[name[create_lua_script], parameter[name[plugins]]]
variable[lua] assign[=] call[name[urllib].parse.quote_plus, parameter[name[lua_script]]]
variable[page_url] assign[=] <ast.JoinedStr object at 0x7da1b13a5450>
<ast.Try object at 0x7da1b13a5e40>
call[name[logger].debug, parameter[constant[[+] Response received]]]
variable[json_data] assign[=] call[name[res].json, parameter[]]
if compare[name[res].status_code in name[ERROR_STATUS_CODES]] begin[:]
<ast.Raise object at 0x7da18f00ddb0>
variable[softwares] assign[=] call[name[json_data]][constant[softwares]]
variable[scripts] assign[=] call[call[name[json_data]][constant[scripts]].values, parameter[]]
variable[har] assign[=] call[name[get_valid_har], parameter[call[name[json_data]][constant[har]]]]
variable[js_error] assign[=] call[name[get_evaljs_error], parameter[name[json_data]]]
if name[js_error] begin[:]
call[name[logger].debug, parameter[constant[[+] WARNING: failed to eval JS matchers: %(n)s], dictionary[[<ast.Constant object at 0x7da1b1231870>], [<ast.Name object at 0x7da1b1233190>]]]]
call[name[logger].debug, parameter[constant[[+] Detected %(n)d scripts from the DOM], dictionary[[<ast.Constant object at 0x7da1b13587f0>], [<ast.Call object at 0x7da1b135b520>]]]]
call[name[logger].debug, parameter[constant[[+] Final HAR has %(n)d valid entries], dictionary[[<ast.Constant object at 0x7da1b1358850>], [<ast.Call object at 0x7da1b1359720>]]]]
return[dictionary[[<ast.Constant object at 0x7da1b13593f0>, <ast.Constant object at 0x7da18f00c8b0>, <ast.Constant object at 0x7da18f00d660>], [<ast.Name object at 0x7da18f00ca90>, <ast.Name object at 0x7da18f00cca0>, <ast.Name object at 0x7da18f00e260>]]] | keyword[def] identifier[get_response] ( identifier[url] , identifier[plugins] , identifier[timeout] = identifier[SPLASH_TIMEOUT] ):
literal[string]
identifier[lua_script] = identifier[create_lua_script] ( identifier[plugins] )
identifier[lua] = identifier[urllib] . identifier[parse] . identifier[quote_plus] ( identifier[lua_script] )
identifier[page_url] = literal[string]
keyword[try] :
keyword[with] identifier[docker_container] ():
identifier[logger] . identifier[debug] ( literal[string] )
identifier[res] = identifier[requests] . identifier[get] ( identifier[page_url] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] :
keyword[raise] identifier[SplashError] ( literal[string] . identifier[format] ( identifier[SPLASH_URL] ))
identifier[logger] . identifier[debug] ( literal[string] )
identifier[json_data] = identifier[res] . identifier[json] ()
keyword[if] identifier[res] . identifier[status_code] keyword[in] identifier[ERROR_STATUS_CODES] :
keyword[raise] identifier[SplashError] ( identifier[get_splash_error] ( identifier[json_data] ))
identifier[softwares] = identifier[json_data] [ literal[string] ]
identifier[scripts] = identifier[json_data] [ literal[string] ]. identifier[values] ()
identifier[har] = identifier[get_valid_har] ( identifier[json_data] [ literal[string] ])
identifier[js_error] = identifier[get_evaljs_error] ( identifier[json_data] )
keyword[if] identifier[js_error] :
identifier[logger] . identifier[debug] ( literal[string] ,{ literal[string] : identifier[js_error] })
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] ,{ literal[string] : identifier[len] ( identifier[softwares] )})
identifier[logger] . identifier[debug] ( literal[string] ,{ literal[string] : identifier[len] ( identifier[scripts] )})
identifier[logger] . identifier[debug] ( literal[string] ,{ literal[string] : identifier[len] ( identifier[har] )})
keyword[return] { literal[string] : identifier[har] , literal[string] : identifier[scripts] , literal[string] : identifier[softwares] } | def get_response(url, plugins, timeout=SPLASH_TIMEOUT):
"""
Return response with HAR, inline scritps and software detected by JS matchers.
:rtype: dict
"""
lua_script = create_lua_script(plugins)
lua = urllib.parse.quote_plus(lua_script)
page_url = f'{SPLASH_URL}/execute?url={url}&timeout={timeout}&lua_source={lua}'
try:
with docker_container():
logger.debug('[+] Sending request to Splash instance')
res = requests.get(page_url) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except requests.exceptions.ConnectionError:
raise SplashError('Could not connect to Splash server {}'.format(SPLASH_URL)) # depends on [control=['except'], data=[]]
logger.debug('[+] Response received')
json_data = res.json()
if res.status_code in ERROR_STATUS_CODES:
raise SplashError(get_splash_error(json_data)) # depends on [control=['if'], data=[]]
softwares = json_data['softwares']
scripts = json_data['scripts'].values()
har = get_valid_har(json_data['har'])
js_error = get_evaljs_error(json_data)
if js_error:
logger.debug('[+] WARNING: failed to eval JS matchers: %(n)s', {'n': js_error}) # depends on [control=['if'], data=[]]
else:
logger.debug('[+] Detected %(n)d softwares from the DOM', {'n': len(softwares)})
logger.debug('[+] Detected %(n)d scripts from the DOM', {'n': len(scripts)})
logger.debug('[+] Final HAR has %(n)d valid entries', {'n': len(har)})
return {'har': har, 'scripts': scripts, 'softwares': softwares} |
def get_children_graph(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key
"""
if forbidden_item_ids is None:
forbidden_item_ids = set()
def _children(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('children')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('children')
return {
item.id: sorted([
_item.id for _item in item.children.all()
if _item.active and _item.id not in forbidden_item_ids
])
for item in items if item.id not in forbidden_item_ids
}
if item_ids is None:
return self._reachable_graph(None, _children, language=language)
else:
graph = self.get_children_graph(None, language, forbidden_item_ids=forbidden_item_ids)
return self._subset_graph(graph, set(item_ids) - set(forbidden_item_ids)) | def function[get_children_graph, parameter[self, item_ids, language, forbidden_item_ids]]:
constant[
Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key
]
if compare[name[forbidden_item_ids] is constant[None]] begin[:]
variable[forbidden_item_ids] assign[=] call[name[set], parameter[]]
def function[_children, parameter[item_ids]]:
if compare[name[item_ids] is constant[None]] begin[:]
variable[items] assign[=] call[call[name[Item].objects.filter, parameter[]].prefetch_related, parameter[constant[children]]]
return[<ast.DictComp object at 0x7da20c76c040>]
if compare[name[item_ids] is constant[None]] begin[:]
return[call[name[self]._reachable_graph, parameter[constant[None], name[_children]]]] | keyword[def] identifier[get_children_graph] ( identifier[self] , identifier[item_ids] = keyword[None] , identifier[language] = keyword[None] , identifier[forbidden_item_ids] = keyword[None] ):
literal[string]
keyword[if] identifier[forbidden_item_ids] keyword[is] keyword[None] :
identifier[forbidden_item_ids] = identifier[set] ()
keyword[def] identifier[_children] ( identifier[item_ids] ):
keyword[if] identifier[item_ids] keyword[is] keyword[None] :
identifier[items] = identifier[Item] . identifier[objects] . identifier[filter] ( identifier[active] = keyword[True] ). identifier[prefetch_related] ( literal[string] )
keyword[else] :
identifier[item_ids] =[ identifier[ii] keyword[for] identifier[iis] keyword[in] identifier[item_ids] . identifier[values] () keyword[for] identifier[ii] keyword[in] identifier[iis] ]
identifier[items] = identifier[Item] . identifier[objects] . identifier[filter] ( identifier[id__in] = identifier[item_ids] , identifier[active] = keyword[True] ). identifier[prefetch_related] ( literal[string] )
keyword[return] {
identifier[item] . identifier[id] : identifier[sorted] ([
identifier[_item] . identifier[id] keyword[for] identifier[_item] keyword[in] identifier[item] . identifier[children] . identifier[all] ()
keyword[if] identifier[_item] . identifier[active] keyword[and] identifier[_item] . identifier[id] keyword[not] keyword[in] identifier[forbidden_item_ids]
])
keyword[for] identifier[item] keyword[in] identifier[items] keyword[if] identifier[item] . identifier[id] keyword[not] keyword[in] identifier[forbidden_item_ids]
}
keyword[if] identifier[item_ids] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_reachable_graph] ( keyword[None] , identifier[_children] , identifier[language] = identifier[language] )
keyword[else] :
identifier[graph] = identifier[self] . identifier[get_children_graph] ( keyword[None] , identifier[language] , identifier[forbidden_item_ids] = identifier[forbidden_item_ids] )
keyword[return] identifier[self] . identifier[_subset_graph] ( identifier[graph] , identifier[set] ( identifier[item_ids] )- identifier[set] ( identifier[forbidden_item_ids] )) | def get_children_graph(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key
"""
if forbidden_item_ids is None:
forbidden_item_ids = set() # depends on [control=['if'], data=['forbidden_item_ids']]
def _children(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('children') # depends on [control=['if'], data=[]]
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('children')
return {item.id: sorted([_item.id for _item in item.children.all() if _item.active and _item.id not in forbidden_item_ids]) for item in items if item.id not in forbidden_item_ids}
if item_ids is None:
return self._reachable_graph(None, _children, language=language) # depends on [control=['if'], data=[]]
else:
graph = self.get_children_graph(None, language, forbidden_item_ids=forbidden_item_ids)
return self._subset_graph(graph, set(item_ids) - set(forbidden_item_ids)) |
def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data | def function[dataset, parameter[self]]:
constant[A Tablib Dataset containing the row.]
variable[data] assign[=] call[name[tablib].Dataset, parameter[]]
name[data].headers assign[=] call[name[self].keys, parameter[]]
variable[row] assign[=] call[name[_reduce_datetimes], parameter[call[name[self].values, parameter[]]]]
call[name[data].append, parameter[name[row]]]
return[name[data]] | keyword[def] identifier[dataset] ( identifier[self] ):
literal[string]
identifier[data] = identifier[tablib] . identifier[Dataset] ()
identifier[data] . identifier[headers] = identifier[self] . identifier[keys] ()
identifier[row] = identifier[_reduce_datetimes] ( identifier[self] . identifier[values] ())
identifier[data] . identifier[append] ( identifier[row] )
keyword[return] identifier[data] | def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data |
def close(self, filehandle):
"""Close openend file if no longer used."""
with self.lock:
if filehandle in self.files:
self.files[filehandle] -= 1
# trim the file cache
index = 0
size = len(self.past)
while size > self.size and index < size:
filehandle = self.past[index]
if self.files[filehandle] == 0:
filehandle.close()
del self.files[filehandle]
del self.past[index]
size -= 1
else:
index += 1 | def function[close, parameter[self, filehandle]]:
constant[Close openend file if no longer used.]
with name[self].lock begin[:]
if compare[name[filehandle] in name[self].files] begin[:]
<ast.AugAssign object at 0x7da1b18c2c20>
variable[index] assign[=] constant[0]
variable[size] assign[=] call[name[len], parameter[name[self].past]]
while <ast.BoolOp object at 0x7da1b18c3190> begin[:]
variable[filehandle] assign[=] call[name[self].past][name[index]]
if compare[call[name[self].files][name[filehandle]] equal[==] constant[0]] begin[:]
call[name[filehandle].close, parameter[]]
<ast.Delete object at 0x7da1b18c0d90>
<ast.Delete object at 0x7da1b18c3130>
<ast.AugAssign object at 0x7da1b190e860> | keyword[def] identifier[close] ( identifier[self] , identifier[filehandle] ):
literal[string]
keyword[with] identifier[self] . identifier[lock] :
keyword[if] identifier[filehandle] keyword[in] identifier[self] . identifier[files] :
identifier[self] . identifier[files] [ identifier[filehandle] ]-= literal[int]
identifier[index] = literal[int]
identifier[size] = identifier[len] ( identifier[self] . identifier[past] )
keyword[while] identifier[size] > identifier[self] . identifier[size] keyword[and] identifier[index] < identifier[size] :
identifier[filehandle] = identifier[self] . identifier[past] [ identifier[index] ]
keyword[if] identifier[self] . identifier[files] [ identifier[filehandle] ]== literal[int] :
identifier[filehandle] . identifier[close] ()
keyword[del] identifier[self] . identifier[files] [ identifier[filehandle] ]
keyword[del] identifier[self] . identifier[past] [ identifier[index] ]
identifier[size] -= literal[int]
keyword[else] :
identifier[index] += literal[int] | def close(self, filehandle):
"""Close openend file if no longer used."""
with self.lock:
if filehandle in self.files:
self.files[filehandle] -= 1
# trim the file cache
index = 0
size = len(self.past)
while size > self.size and index < size:
filehandle = self.past[index]
if self.files[filehandle] == 0:
filehandle.close()
del self.files[filehandle]
del self.past[index]
size -= 1 # depends on [control=['if'], data=[]]
else:
index += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=['filehandle']] # depends on [control=['with'], data=[]] |
def _matchOther(self, obj, **kwargs):
"""Perform _match but on another object, not self."""
if obj is not None:
# Need to check that the returned UI element wasn't destroyed first:
if self._findFirstR(**kwargs):
return obj._match(**kwargs)
return False | def function[_matchOther, parameter[self, obj]]:
constant[Perform _match but on another object, not self.]
if compare[name[obj] is_not constant[None]] begin[:]
if call[name[self]._findFirstR, parameter[]] begin[:]
return[call[name[obj]._match, parameter[]]]
return[constant[False]] | keyword[def] identifier[_matchOther] ( identifier[self] , identifier[obj] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[obj] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[_findFirstR] (** identifier[kwargs] ):
keyword[return] identifier[obj] . identifier[_match] (** identifier[kwargs] )
keyword[return] keyword[False] | def _matchOther(self, obj, **kwargs):
"""Perform _match but on another object, not self."""
if obj is not None:
# Need to check that the returned UI element wasn't destroyed first:
if self._findFirstR(**kwargs):
return obj._match(**kwargs) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['obj']]
return False |
def homer_stats_table_tagInfo(self):
""" Add core HOMER stats to the general stats table from tagInfo file"""
if len(self.tagdir_data['header']) == 0:
return None
headers = OrderedDict()
headers['UniqPositions'] = {
'title': 'Uniq Pos',
'description': 'Numer of Unique Di-Tags Passed Through HOMER',
'format': '{:,.0f}',
'modify': lambda x: x * 0.000001,
'suffix': "M"
}
headers['TotalPositions'] = {
'title': 'Total Pos',
'description': 'Numer of Total Di-Tags Passed Through HOMER',
'format': '{:,.0f}',
'modify': lambda x: x * 0.000001,
'suffix': "M"
}
headers['fragmentLengthEstimate'] = {
'title': 'fragment Length',
'description': 'Estimate of Fragnment Length',
'format': '{:,.0f}'
}
headers['peakSizeEstimate'] = {
'title': 'Peak Size',
'description': 'Estimate of Peak Size',
'format': '{:,.0f}'
}
headers['tagsPerBP'] = {
'title': 'tagsPerBP',
'description': 'average tags Per basepair',
'format': '{:,.3f}',
}
headers['TagsPerPosition'] = {
'title': 'averageTagsPerPosition',
'description': 'Average Tags Per Position',
'format': '{:,.2f}'
}
headers['averageTagLength'] = {
'title': 'TagLength',
'description': 'Average Tag Length',
'format': '{:,.0f}'
}
headers['averageFragmentGCcontent'] = {
'title': 'GCcontent',
'description': 'Average Fragment GC content',
'max': 1,
'min': 0,
'format': '{:,.2f}'
}
self.general_stats_addcols(self.tagdir_data['header'], headers, 'HOMER') | def function[homer_stats_table_tagInfo, parameter[self]]:
constant[ Add core HOMER stats to the general stats table from tagInfo file]
if compare[call[name[len], parameter[call[name[self].tagdir_data][constant[header]]]] equal[==] constant[0]] begin[:]
return[constant[None]]
variable[headers] assign[=] call[name[OrderedDict], parameter[]]
call[name[headers]][constant[UniqPositions]] assign[=] dictionary[[<ast.Constant object at 0x7da18bccbcd0>, <ast.Constant object at 0x7da18bcc9f60>, <ast.Constant object at 0x7da18bcc8190>, <ast.Constant object at 0x7da18bcc8b50>, <ast.Constant object at 0x7da18bccafe0>], [<ast.Constant object at 0x7da18bccba30>, <ast.Constant object at 0x7da18bcc9900>, <ast.Constant object at 0x7da18bcc88b0>, <ast.Lambda object at 0x7da18bcc9bd0>, <ast.Constant object at 0x7da204566920>]]
call[name[headers]][constant[TotalPositions]] assign[=] dictionary[[<ast.Constant object at 0x7da204567c40>, <ast.Constant object at 0x7da204564910>, <ast.Constant object at 0x7da18bcc8160>, <ast.Constant object at 0x7da18bcc8370>, <ast.Constant object at 0x7da204565660>], [<ast.Constant object at 0x7da204567a30>, <ast.Constant object at 0x7da204565990>, <ast.Constant object at 0x7da204565ae0>, <ast.Lambda object at 0x7da204566fb0>, <ast.Constant object at 0x7da204566110>]]
call[name[headers]][constant[fragmentLengthEstimate]] assign[=] dictionary[[<ast.Constant object at 0x7da204567bb0>, <ast.Constant object at 0x7da204564fd0>, <ast.Constant object at 0x7da204564af0>], [<ast.Constant object at 0x7da204566e00>, <ast.Constant object at 0x7da204565180>, <ast.Constant object at 0x7da204566800>]]
call[name[headers]][constant[peakSizeEstimate]] assign[=] dictionary[[<ast.Constant object at 0x7da204564280>, <ast.Constant object at 0x7da204567dc0>, <ast.Constant object at 0x7da204567ee0>], [<ast.Constant object at 0x7da204564220>, <ast.Constant object at 0x7da204564610>, <ast.Constant object at 0x7da204567d60>]]
call[name[headers]][constant[tagsPerBP]] assign[=] dictionary[[<ast.Constant object at 0x7da2045667a0>, <ast.Constant object at 0x7da204566b30>, <ast.Constant object at 0x7da204565240>], [<ast.Constant object at 0x7da204564400>, <ast.Constant object at 0x7da204566a70>, <ast.Constant object at 0x7da204565720>]]
call[name[headers]][constant[TagsPerPosition]] assign[=] dictionary[[<ast.Constant object at 0x7da204565270>, <ast.Constant object at 0x7da2045660e0>, <ast.Constant object at 0x7da204564ee0>], [<ast.Constant object at 0x7da2045675b0>, <ast.Constant object at 0x7da204566c50>, <ast.Constant object at 0x7da204566b00>]]
call[name[headers]][constant[averageTagLength]] assign[=] dictionary[[<ast.Constant object at 0x7da204564100>, <ast.Constant object at 0x7da2045645b0>, <ast.Constant object at 0x7da204564310>], [<ast.Constant object at 0x7da2045662f0>, <ast.Constant object at 0x7da204567eb0>, <ast.Constant object at 0x7da204564d60>]]
call[name[headers]][constant[averageFragmentGCcontent]] assign[=] dictionary[[<ast.Constant object at 0x7da204567940>, <ast.Constant object at 0x7da204564e20>, <ast.Constant object at 0x7da204565a50>, <ast.Constant object at 0x7da204567640>, <ast.Constant object at 0x7da204565000>], [<ast.Constant object at 0x7da204566500>, <ast.Constant object at 0x7da2045674c0>, <ast.Constant object at 0x7da204566b90>, <ast.Constant object at 0x7da204565090>, <ast.Constant object at 0x7da2045640d0>]]
call[name[self].general_stats_addcols, parameter[call[name[self].tagdir_data][constant[header]], name[headers], constant[HOMER]]] | keyword[def] identifier[homer_stats_table_tagInfo] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[tagdir_data] [ literal[string] ])== literal[int] :
keyword[return] keyword[None]
identifier[headers] = identifier[OrderedDict] ()
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[lambda] identifier[x] : identifier[x] * literal[int] ,
literal[string] : literal[string]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[lambda] identifier[x] : identifier[x] * literal[int] ,
literal[string] : literal[string]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[string]
}
identifier[self] . identifier[general_stats_addcols] ( identifier[self] . identifier[tagdir_data] [ literal[string] ], identifier[headers] , literal[string] ) | def homer_stats_table_tagInfo(self):
""" Add core HOMER stats to the general stats table from tagInfo file"""
if len(self.tagdir_data['header']) == 0:
return None # depends on [control=['if'], data=[]]
headers = OrderedDict()
headers['UniqPositions'] = {'title': 'Uniq Pos', 'description': 'Numer of Unique Di-Tags Passed Through HOMER', 'format': '{:,.0f}', 'modify': lambda x: x * 1e-06, 'suffix': 'M'}
headers['TotalPositions'] = {'title': 'Total Pos', 'description': 'Numer of Total Di-Tags Passed Through HOMER', 'format': '{:,.0f}', 'modify': lambda x: x * 1e-06, 'suffix': 'M'}
headers['fragmentLengthEstimate'] = {'title': 'fragment Length', 'description': 'Estimate of Fragnment Length', 'format': '{:,.0f}'}
headers['peakSizeEstimate'] = {'title': 'Peak Size', 'description': 'Estimate of Peak Size', 'format': '{:,.0f}'}
headers['tagsPerBP'] = {'title': 'tagsPerBP', 'description': 'average tags Per basepair', 'format': '{:,.3f}'}
headers['TagsPerPosition'] = {'title': 'averageTagsPerPosition', 'description': 'Average Tags Per Position', 'format': '{:,.2f}'}
headers['averageTagLength'] = {'title': 'TagLength', 'description': 'Average Tag Length', 'format': '{:,.0f}'}
headers['averageFragmentGCcontent'] = {'title': 'GCcontent', 'description': 'Average Fragment GC content', 'max': 1, 'min': 0, 'format': '{:,.2f}'}
self.general_stats_addcols(self.tagdir_data['header'], headers, 'HOMER') |
def get_config(self):
"""Return configurations of BoltzmannQPolicy
# Returns
Dict of config
"""
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config | def function[get_config, parameter[self]]:
constant[Return configurations of BoltzmannQPolicy
# Returns
Dict of config
]
variable[config] assign[=] call[call[name[super], parameter[name[BoltzmannQPolicy], name[self]]].get_config, parameter[]]
call[name[config]][constant[tau]] assign[=] name[self].tau
call[name[config]][constant[clip]] assign[=] name[self].clip
return[name[config]] | keyword[def] identifier[get_config] ( identifier[self] ):
literal[string]
identifier[config] = identifier[super] ( identifier[BoltzmannQPolicy] , identifier[self] ). identifier[get_config] ()
identifier[config] [ literal[string] ]= identifier[self] . identifier[tau]
identifier[config] [ literal[string] ]= identifier[self] . identifier[clip]
keyword[return] identifier[config] | def get_config(self):
"""Return configurations of BoltzmannQPolicy
# Returns
Dict of config
"""
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config |
def _on_server_disconnect(self, exception):
"""Handle server disconnection."""
self._protocol = None
if self._on_disconnect_callback_func and callable(self._on_disconnect_callback_func):
self._on_disconnect_callback_func(exception)
if self._reconnect:
self._reconnect_cb() | def function[_on_server_disconnect, parameter[self, exception]]:
constant[Handle server disconnection.]
name[self]._protocol assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b02e5a20> begin[:]
call[name[self]._on_disconnect_callback_func, parameter[name[exception]]]
if name[self]._reconnect begin[:]
call[name[self]._reconnect_cb, parameter[]] | keyword[def] identifier[_on_server_disconnect] ( identifier[self] , identifier[exception] ):
literal[string]
identifier[self] . identifier[_protocol] = keyword[None]
keyword[if] identifier[self] . identifier[_on_disconnect_callback_func] keyword[and] identifier[callable] ( identifier[self] . identifier[_on_disconnect_callback_func] ):
identifier[self] . identifier[_on_disconnect_callback_func] ( identifier[exception] )
keyword[if] identifier[self] . identifier[_reconnect] :
identifier[self] . identifier[_reconnect_cb] () | def _on_server_disconnect(self, exception):
"""Handle server disconnection."""
self._protocol = None
if self._on_disconnect_callback_func and callable(self._on_disconnect_callback_func):
self._on_disconnect_callback_func(exception) # depends on [control=['if'], data=[]]
if self._reconnect:
self._reconnect_cb() # depends on [control=['if'], data=[]] |
def load(fname: str) -> 'Config':
"""
Returns a Config object loaded from a file. The loaded object is not frozen.
:param fname: Name of file to load the Config from.
:return: Configuration.
"""
with open(fname) as inp:
obj = yaml.load(inp)
obj.__add_frozen()
return obj | def function[load, parameter[fname]]:
constant[
Returns a Config object loaded from a file. The loaded object is not frozen.
:param fname: Name of file to load the Config from.
:return: Configuration.
]
with call[name[open], parameter[name[fname]]] begin[:]
variable[obj] assign[=] call[name[yaml].load, parameter[name[inp]]]
call[name[obj].__add_frozen, parameter[]]
return[name[obj]] | keyword[def] identifier[load] ( identifier[fname] : identifier[str] )-> literal[string] :
literal[string]
keyword[with] identifier[open] ( identifier[fname] ) keyword[as] identifier[inp] :
identifier[obj] = identifier[yaml] . identifier[load] ( identifier[inp] )
identifier[obj] . identifier[__add_frozen] ()
keyword[return] identifier[obj] | def load(fname: str) -> 'Config':
"""
Returns a Config object loaded from a file. The loaded object is not frozen.
:param fname: Name of file to load the Config from.
:return: Configuration.
"""
with open(fname) as inp:
obj = yaml.load(inp)
obj.__add_frozen()
return obj # depends on [control=['with'], data=['inp']] |
def validate_duration(self, field, duration):
'''
2h
2h5m
5m
180
1h4m3
:param duration:
:return:
'''
DURATION_RE = r'^(\d+d)?(\d+h)?(\d+m)?(\d+s?)?$'
if not re.match(DURATION_RE, duration):
self._error(field, 'Load duration examples: 2h30m; 5m15; 180') | def function[validate_duration, parameter[self, field, duration]]:
constant[
2h
2h5m
5m
180
1h4m3
:param duration:
:return:
]
variable[DURATION_RE] assign[=] constant[^(\d+d)?(\d+h)?(\d+m)?(\d+s?)?$]
if <ast.UnaryOp object at 0x7da1b03a8f10> begin[:]
call[name[self]._error, parameter[name[field], constant[Load duration examples: 2h30m; 5m15; 180]]] | keyword[def] identifier[validate_duration] ( identifier[self] , identifier[field] , identifier[duration] ):
literal[string]
identifier[DURATION_RE] = literal[string]
keyword[if] keyword[not] identifier[re] . identifier[match] ( identifier[DURATION_RE] , identifier[duration] ):
identifier[self] . identifier[_error] ( identifier[field] , literal[string] ) | def validate_duration(self, field, duration):
"""
2h
2h5m
5m
180
1h4m3
:param duration:
:return:
"""
DURATION_RE = '^(\\d+d)?(\\d+h)?(\\d+m)?(\\d+s?)?$'
if not re.match(DURATION_RE, duration):
self._error(field, 'Load duration examples: 2h30m; 5m15; 180') # depends on [control=['if'], data=[]] |
def horizontal_infrared_radiation_intensity(self, value=9999.0):
"""Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `horizontal_infrared_radiation_intensity`'.format(value))
if value < 0.0:
raise ValueError(
'value need to be greater or equal 0.0 '
'for field `horizontal_infrared_radiation_intensity`')
self._horizontal_infrared_radiation_intensity = value | def function[horizontal_infrared_radiation_intensity, parameter[self, value]]:
constant[Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0fb0940>
if compare[name[value] less[<] constant[0.0]] begin[:]
<ast.Raise object at 0x7da1b0fb0c40>
name[self]._horizontal_infrared_radiation_intensity assign[=] name[value] | keyword[def] identifier[horizontal_infrared_radiation_intensity] ( identifier[self] , identifier[value] = literal[int] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
keyword[if] identifier[value] < literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
identifier[self] . identifier[_horizontal_infrared_radiation_intensity] = identifier[value] | def horizontal_infrared_radiation_intensity(self, value=9999.0):
"""Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `horizontal_infrared_radiation_intensity`'.format(value)) # depends on [control=['except'], data=[]]
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 for field `horizontal_infrared_radiation_intensity`') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']]
self._horizontal_infrared_radiation_intensity = value |
def connect(self, ip_address, tsap_snap7, tsap_logo, tcpport=102):
"""
Connect to a Siemens LOGO server. Howto setup Logo communication configuration see: http://snap7.sourceforge.net/logo.html
:param ip_address: IP ip_address of server
:param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000)
:param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
"""
logger.info("connecting to %s:%s tsap_snap7 %s tsap_logo %s" % (ip_address, tcpport,
tsap_snap7, tsap_logo))
# special handling for Siemens Logo
# 1st set connection params
# 2nd connect without any parameters
self.set_param(snap7.snap7types.RemotePort, tcpport)
self.set_connection_params(ip_address, tsap_snap7, tsap_logo)
result = self.library.Cli_Connect(self.pointer)
check_error(result, context="client")
return result | def function[connect, parameter[self, ip_address, tsap_snap7, tsap_logo, tcpport]]:
constant[
Connect to a Siemens LOGO server. Howto setup Logo communication configuration see: http://snap7.sourceforge.net/logo.html
:param ip_address: IP ip_address of server
:param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000)
:param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
]
call[name[logger].info, parameter[binary_operation[constant[connecting to %s:%s tsap_snap7 %s tsap_logo %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204564220>, <ast.Name object at 0x7da204567a30>, <ast.Name object at 0x7da204565720>, <ast.Name object at 0x7da204566fb0>]]]]]
call[name[self].set_param, parameter[name[snap7].snap7types.RemotePort, name[tcpport]]]
call[name[self].set_connection_params, parameter[name[ip_address], name[tsap_snap7], name[tsap_logo]]]
variable[result] assign[=] call[name[self].library.Cli_Connect, parameter[name[self].pointer]]
call[name[check_error], parameter[name[result]]]
return[name[result]] | keyword[def] identifier[connect] ( identifier[self] , identifier[ip_address] , identifier[tsap_snap7] , identifier[tsap_logo] , identifier[tcpport] = literal[int] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] %( identifier[ip_address] , identifier[tcpport] ,
identifier[tsap_snap7] , identifier[tsap_logo] ))
identifier[self] . identifier[set_param] ( identifier[snap7] . identifier[snap7types] . identifier[RemotePort] , identifier[tcpport] )
identifier[self] . identifier[set_connection_params] ( identifier[ip_address] , identifier[tsap_snap7] , identifier[tsap_logo] )
identifier[result] = identifier[self] . identifier[library] . identifier[Cli_Connect] ( identifier[self] . identifier[pointer] )
identifier[check_error] ( identifier[result] , identifier[context] = literal[string] )
keyword[return] identifier[result] | def connect(self, ip_address, tsap_snap7, tsap_logo, tcpport=102):
"""
Connect to a Siemens LOGO server. Howto setup Logo communication configuration see: http://snap7.sourceforge.net/logo.html
:param ip_address: IP ip_address of server
:param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000)
:param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
"""
logger.info('connecting to %s:%s tsap_snap7 %s tsap_logo %s' % (ip_address, tcpport, tsap_snap7, tsap_logo))
# special handling for Siemens Logo
# 1st set connection params
# 2nd connect without any parameters
self.set_param(snap7.snap7types.RemotePort, tcpport)
self.set_connection_params(ip_address, tsap_snap7, tsap_logo)
result = self.library.Cli_Connect(self.pointer)
check_error(result, context='client')
return result |
def receive_ack_requesting(self, pkt):
"""Receive ACK in REQUESTING state."""
logger.debug("C3. Received ACK?, in REQUESTING state.")
if self.process_received_ack(pkt):
logger.debug("C3: T. Received ACK, in REQUESTING state, "
"raise BOUND.")
raise self.BOUND() | def function[receive_ack_requesting, parameter[self, pkt]]:
constant[Receive ACK in REQUESTING state.]
call[name[logger].debug, parameter[constant[C3. Received ACK?, in REQUESTING state.]]]
if call[name[self].process_received_ack, parameter[name[pkt]]] begin[:]
call[name[logger].debug, parameter[constant[C3: T. Received ACK, in REQUESTING state, raise BOUND.]]]
<ast.Raise object at 0x7da1b03bac50> | keyword[def] identifier[receive_ack_requesting] ( identifier[self] , identifier[pkt] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[self] . identifier[process_received_ack] ( identifier[pkt] ):
identifier[logger] . identifier[debug] ( literal[string]
literal[string] )
keyword[raise] identifier[self] . identifier[BOUND] () | def receive_ack_requesting(self, pkt):
"""Receive ACK in REQUESTING state."""
logger.debug('C3. Received ACK?, in REQUESTING state.')
if self.process_received_ack(pkt):
logger.debug('C3: T. Received ACK, in REQUESTING state, raise BOUND.')
raise self.BOUND() # depends on [control=['if'], data=[]] |
def from_data( name, coors, ngroups, conns, mat_ids, descs, igs = None ):
"""
Create a mesh from mesh data.
"""
if igs is None:
igs = range( len( conns ) )
mesh = Mesh(name)
mesh._set_data(coors = coors,
ngroups = ngroups,
conns = [conns[ig] for ig in igs],
mat_ids = [mat_ids[ig] for ig in igs],
descs = [descs[ig] for ig in igs])
mesh._set_shape_info()
return mesh | def function[from_data, parameter[name, coors, ngroups, conns, mat_ids, descs, igs]]:
constant[
Create a mesh from mesh data.
]
if compare[name[igs] is constant[None]] begin[:]
variable[igs] assign[=] call[name[range], parameter[call[name[len], parameter[name[conns]]]]]
variable[mesh] assign[=] call[name[Mesh], parameter[name[name]]]
call[name[mesh]._set_data, parameter[]]
call[name[mesh]._set_shape_info, parameter[]]
return[name[mesh]] | keyword[def] identifier[from_data] ( identifier[name] , identifier[coors] , identifier[ngroups] , identifier[conns] , identifier[mat_ids] , identifier[descs] , identifier[igs] = keyword[None] ):
literal[string]
keyword[if] identifier[igs] keyword[is] keyword[None] :
identifier[igs] = identifier[range] ( identifier[len] ( identifier[conns] ))
identifier[mesh] = identifier[Mesh] ( identifier[name] )
identifier[mesh] . identifier[_set_data] ( identifier[coors] = identifier[coors] ,
identifier[ngroups] = identifier[ngroups] ,
identifier[conns] =[ identifier[conns] [ identifier[ig] ] keyword[for] identifier[ig] keyword[in] identifier[igs] ],
identifier[mat_ids] =[ identifier[mat_ids] [ identifier[ig] ] keyword[for] identifier[ig] keyword[in] identifier[igs] ],
identifier[descs] =[ identifier[descs] [ identifier[ig] ] keyword[for] identifier[ig] keyword[in] identifier[igs] ])
identifier[mesh] . identifier[_set_shape_info] ()
keyword[return] identifier[mesh] | def from_data(name, coors, ngroups, conns, mat_ids, descs, igs=None):
"""
Create a mesh from mesh data.
"""
if igs is None:
igs = range(len(conns)) # depends on [control=['if'], data=['igs']]
mesh = Mesh(name)
mesh._set_data(coors=coors, ngroups=ngroups, conns=[conns[ig] for ig in igs], mat_ids=[mat_ids[ig] for ig in igs], descs=[descs[ig] for ig in igs])
mesh._set_shape_info()
return mesh |
def run_command_under_r_root(self, cmd, catched=True):
"""
subprocess run on here
"""
RPATH = self.path
with self.cd(newdir=RPATH):
if catched:
process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
else:
process = sp.run(cmd)
return process | def function[run_command_under_r_root, parameter[self, cmd, catched]]:
constant[
subprocess run on here
]
variable[RPATH] assign[=] name[self].path
with call[name[self].cd, parameter[]] begin[:]
if name[catched] begin[:]
variable[process] assign[=] call[name[sp].run, parameter[name[cmd]]]
return[name[process]] | keyword[def] identifier[run_command_under_r_root] ( identifier[self] , identifier[cmd] , identifier[catched] = keyword[True] ):
literal[string]
identifier[RPATH] = identifier[self] . identifier[path]
keyword[with] identifier[self] . identifier[cd] ( identifier[newdir] = identifier[RPATH] ):
keyword[if] identifier[catched] :
identifier[process] = identifier[sp] . identifier[run] ( identifier[cmd] , identifier[stdout] = identifier[sp] . identifier[PIPE] , identifier[stderr] = identifier[sp] . identifier[PIPE] )
keyword[else] :
identifier[process] = identifier[sp] . identifier[run] ( identifier[cmd] )
keyword[return] identifier[process] | def run_command_under_r_root(self, cmd, catched=True):
"""
subprocess run on here
"""
RPATH = self.path
with self.cd(newdir=RPATH):
if catched:
process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # depends on [control=['if'], data=[]]
else:
process = sp.run(cmd)
return process # depends on [control=['with'], data=[]] |
def browse(self, cat=None, subCat=None):
"""Browse categories. If neither cat nor subcat are specified,
return a list of categories, otherwise it return a list of apps
using cat (category ID) and subCat (subcategory ID) as filters."""
path = BROWSE_URL + "?c=3"
if cat is not None:
path += "&cat={}".format(requests.utils.quote(cat))
if subCat is not None:
path += "&ctr={}".format(requests.utils.quote(subCat))
data = self.executeRequestApi2(path)
return utils.parseProtobufObj(data.payload.browseResponse) | def function[browse, parameter[self, cat, subCat]]:
constant[Browse categories. If neither cat nor subcat are specified,
return a list of categories, otherwise it return a list of apps
using cat (category ID) and subCat (subcategory ID) as filters.]
variable[path] assign[=] binary_operation[name[BROWSE_URL] + constant[?c=3]]
if compare[name[cat] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18fe92980>
if compare[name[subCat] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18fe90730>
variable[data] assign[=] call[name[self].executeRequestApi2, parameter[name[path]]]
return[call[name[utils].parseProtobufObj, parameter[name[data].payload.browseResponse]]] | keyword[def] identifier[browse] ( identifier[self] , identifier[cat] = keyword[None] , identifier[subCat] = keyword[None] ):
literal[string]
identifier[path] = identifier[BROWSE_URL] + literal[string]
keyword[if] identifier[cat] keyword[is] keyword[not] keyword[None] :
identifier[path] += literal[string] . identifier[format] ( identifier[requests] . identifier[utils] . identifier[quote] ( identifier[cat] ))
keyword[if] identifier[subCat] keyword[is] keyword[not] keyword[None] :
identifier[path] += literal[string] . identifier[format] ( identifier[requests] . identifier[utils] . identifier[quote] ( identifier[subCat] ))
identifier[data] = identifier[self] . identifier[executeRequestApi2] ( identifier[path] )
keyword[return] identifier[utils] . identifier[parseProtobufObj] ( identifier[data] . identifier[payload] . identifier[browseResponse] ) | def browse(self, cat=None, subCat=None):
"""Browse categories. If neither cat nor subcat are specified,
return a list of categories, otherwise it return a list of apps
using cat (category ID) and subCat (subcategory ID) as filters."""
path = BROWSE_URL + '?c=3'
if cat is not None:
path += '&cat={}'.format(requests.utils.quote(cat)) # depends on [control=['if'], data=['cat']]
if subCat is not None:
path += '&ctr={}'.format(requests.utils.quote(subCat)) # depends on [control=['if'], data=['subCat']]
data = self.executeRequestApi2(path)
return utils.parseProtobufObj(data.payload.browseResponse) |
def wrap_function(func):
"""
RETURN A THREE-PARAMETER WINDOW FUNCTION TO MATCH
"""
if is_text(func):
return compile_expression(func)
numarg = func.__code__.co_argcount
if numarg == 0:
def temp(row, rownum, rows):
return func()
return temp
elif numarg == 1:
def temp(row, rownum, rows):
return func(row)
return temp
elif numarg == 2:
def temp(row, rownum, rows):
return func(row, rownum)
return temp
elif numarg == 3:
return func | def function[wrap_function, parameter[func]]:
constant[
RETURN A THREE-PARAMETER WINDOW FUNCTION TO MATCH
]
if call[name[is_text], parameter[name[func]]] begin[:]
return[call[name[compile_expression], parameter[name[func]]]]
variable[numarg] assign[=] name[func].__code__.co_argcount
if compare[name[numarg] equal[==] constant[0]] begin[:]
def function[temp, parameter[row, rownum, rows]]:
return[call[name[func], parameter[]]]
return[name[temp]] | keyword[def] identifier[wrap_function] ( identifier[func] ):
literal[string]
keyword[if] identifier[is_text] ( identifier[func] ):
keyword[return] identifier[compile_expression] ( identifier[func] )
identifier[numarg] = identifier[func] . identifier[__code__] . identifier[co_argcount]
keyword[if] identifier[numarg] == literal[int] :
keyword[def] identifier[temp] ( identifier[row] , identifier[rownum] , identifier[rows] ):
keyword[return] identifier[func] ()
keyword[return] identifier[temp]
keyword[elif] identifier[numarg] == literal[int] :
keyword[def] identifier[temp] ( identifier[row] , identifier[rownum] , identifier[rows] ):
keyword[return] identifier[func] ( identifier[row] )
keyword[return] identifier[temp]
keyword[elif] identifier[numarg] == literal[int] :
keyword[def] identifier[temp] ( identifier[row] , identifier[rownum] , identifier[rows] ):
keyword[return] identifier[func] ( identifier[row] , identifier[rownum] )
keyword[return] identifier[temp]
keyword[elif] identifier[numarg] == literal[int] :
keyword[return] identifier[func] | def wrap_function(func):
"""
RETURN A THREE-PARAMETER WINDOW FUNCTION TO MATCH
"""
if is_text(func):
return compile_expression(func) # depends on [control=['if'], data=[]]
numarg = func.__code__.co_argcount
if numarg == 0:
def temp(row, rownum, rows):
return func()
return temp # depends on [control=['if'], data=[]]
elif numarg == 1:
def temp(row, rownum, rows):
return func(row)
return temp # depends on [control=['if'], data=[]]
elif numarg == 2:
def temp(row, rownum, rows):
return func(row, rownum)
return temp # depends on [control=['if'], data=[]]
elif numarg == 3:
return func # depends on [control=['if'], data=[]] |
def seconds_to_DHMS(seconds, as_str=True):
"""converts seconds to Days, Hours, Minutes, Seconds
:param int seconds: number of seconds
:param bool as_string: to return a formated string defaults to True
:returns: a formated string if as_str else a dictionary
:Example:
>>> seconds_to_DHMS(60*60*24)
001-00:00:00
>>> seconds_to_DHMS(60*60*24, False)
{'hours': 0, 'seconds': 0, 'minutes': 0, 'days': 1}
"""
d = DotDot()
d.days = int(seconds // (3600 * 24))
d.hours = int((seconds // 3600) % 24)
d.minutes = int((seconds // 60) % 60)
d.seconds = int(seconds % 60)
return FMT_DHMS_DICT.format(**d) if as_str else d | def function[seconds_to_DHMS, parameter[seconds, as_str]]:
constant[converts seconds to Days, Hours, Minutes, Seconds
:param int seconds: number of seconds
:param bool as_string: to return a formated string defaults to True
:returns: a formated string if as_str else a dictionary
:Example:
>>> seconds_to_DHMS(60*60*24)
001-00:00:00
>>> seconds_to_DHMS(60*60*24, False)
{'hours': 0, 'seconds': 0, 'minutes': 0, 'days': 1}
]
variable[d] assign[=] call[name[DotDot], parameter[]]
name[d].days assign[=] call[name[int], parameter[binary_operation[name[seconds] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[constant[3600] * constant[24]]]]]
name[d].hours assign[=] call[name[int], parameter[binary_operation[binary_operation[name[seconds] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3600]] <ast.Mod object at 0x7da2590d6920> constant[24]]]]
name[d].minutes assign[=] call[name[int], parameter[binary_operation[binary_operation[name[seconds] <ast.FloorDiv object at 0x7da2590d6bc0> constant[60]] <ast.Mod object at 0x7da2590d6920> constant[60]]]]
name[d].seconds assign[=] call[name[int], parameter[binary_operation[name[seconds] <ast.Mod object at 0x7da2590d6920> constant[60]]]]
return[<ast.IfExp object at 0x7da20c7cb2b0>] | keyword[def] identifier[seconds_to_DHMS] ( identifier[seconds] , identifier[as_str] = keyword[True] ):
literal[string]
identifier[d] = identifier[DotDot] ()
identifier[d] . identifier[days] = identifier[int] ( identifier[seconds] //( literal[int] * literal[int] ))
identifier[d] . identifier[hours] = identifier[int] (( identifier[seconds] // literal[int] )% literal[int] )
identifier[d] . identifier[minutes] = identifier[int] (( identifier[seconds] // literal[int] )% literal[int] )
identifier[d] . identifier[seconds] = identifier[int] ( identifier[seconds] % literal[int] )
keyword[return] identifier[FMT_DHMS_DICT] . identifier[format] (** identifier[d] ) keyword[if] identifier[as_str] keyword[else] identifier[d] | def seconds_to_DHMS(seconds, as_str=True):
"""converts seconds to Days, Hours, Minutes, Seconds
:param int seconds: number of seconds
:param bool as_string: to return a formated string defaults to True
:returns: a formated string if as_str else a dictionary
:Example:
>>> seconds_to_DHMS(60*60*24)
001-00:00:00
>>> seconds_to_DHMS(60*60*24, False)
{'hours': 0, 'seconds': 0, 'minutes': 0, 'days': 1}
"""
d = DotDot()
d.days = int(seconds // (3600 * 24))
d.hours = int(seconds // 3600 % 24)
d.minutes = int(seconds // 60 % 60)
d.seconds = int(seconds % 60)
return FMT_DHMS_DICT.format(**d) if as_str else d |
def evaluate_binop_logical(self, operation, left, right, **kwargs):
"""
Evaluate given logical binary operation with given operands.
"""
if not operation in self.binops_logical:
raise ValueError("Invalid logical binary operation '{}'".format(operation))
result = self.binops_logical[operation](left, right)
return bool(result) | def function[evaluate_binop_logical, parameter[self, operation, left, right]]:
constant[
Evaluate given logical binary operation with given operands.
]
if <ast.UnaryOp object at 0x7da20c6aa350> begin[:]
<ast.Raise object at 0x7da20c6a9bd0>
variable[result] assign[=] call[call[name[self].binops_logical][name[operation]], parameter[name[left], name[right]]]
return[call[name[bool], parameter[name[result]]]] | keyword[def] identifier[evaluate_binop_logical] ( identifier[self] , identifier[operation] , identifier[left] , identifier[right] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[operation] keyword[in] identifier[self] . identifier[binops_logical] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[operation] ))
identifier[result] = identifier[self] . identifier[binops_logical] [ identifier[operation] ]( identifier[left] , identifier[right] )
keyword[return] identifier[bool] ( identifier[result] ) | def evaluate_binop_logical(self, operation, left, right, **kwargs):
"""
Evaluate given logical binary operation with given operands.
"""
if not operation in self.binops_logical:
raise ValueError("Invalid logical binary operation '{}'".format(operation)) # depends on [control=['if'], data=[]]
result = self.binops_logical[operation](left, right)
return bool(result) |
def define_laser_variables(Nl, real_amplitudes=False, variables=None):
r"""Return the amplitudes and frequencies of Nl fields.
>>> E0, omega_laser = define_laser_variables(2)
>>> E0, omega_laser
([E_0^1, E_0^2], [varpi_1, varpi_2])
The amplitudes are complex by default:
>>> conjugate(E0[0])
conjugate(E_0^1)
But they can optionally be made real:
>>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True)
>>> conjugate(E0[0])
E_0^1
They can also be made explicit functions of given variables:
>>> from sympy import symbols
>>> t, z = symbols("t, z", real=True)
>>> E0, omega_laser = define_laser_variables(2, variables=[t, z])
>>> E0
[E_0^1(t, z), E_0^2(t, z)]
"""
if variables is None:
E0 = [Symbol(r"E_0^"+str(l+1), real=real_amplitudes)
for l in range(Nl)]
else:
E0 = [Function(r"E_0^"+str(l+1), real=real_amplitudes)(*variables)
for l in range(Nl)]
omega_laser = [Symbol(r"varpi_"+str(l+1), positive=True)
for l in range(Nl)]
return E0, omega_laser | def function[define_laser_variables, parameter[Nl, real_amplitudes, variables]]:
constant[Return the amplitudes and frequencies of Nl fields.
>>> E0, omega_laser = define_laser_variables(2)
>>> E0, omega_laser
([E_0^1, E_0^2], [varpi_1, varpi_2])
The amplitudes are complex by default:
>>> conjugate(E0[0])
conjugate(E_0^1)
But they can optionally be made real:
>>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True)
>>> conjugate(E0[0])
E_0^1
They can also be made explicit functions of given variables:
>>> from sympy import symbols
>>> t, z = symbols("t, z", real=True)
>>> E0, omega_laser = define_laser_variables(2, variables=[t, z])
>>> E0
[E_0^1(t, z), E_0^2(t, z)]
]
if compare[name[variables] is constant[None]] begin[:]
variable[E0] assign[=] <ast.ListComp object at 0x7da1b196b9a0>
variable[omega_laser] assign[=] <ast.ListComp object at 0x7da1b19a1ae0>
return[tuple[[<ast.Name object at 0x7da1b19a1ea0>, <ast.Name object at 0x7da1b19a1ed0>]]] | keyword[def] identifier[define_laser_variables] ( identifier[Nl] , identifier[real_amplitudes] = keyword[False] , identifier[variables] = keyword[None] ):
literal[string]
keyword[if] identifier[variables] keyword[is] keyword[None] :
identifier[E0] =[ identifier[Symbol] ( literal[string] + identifier[str] ( identifier[l] + literal[int] ), identifier[real] = identifier[real_amplitudes] )
keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] )]
keyword[else] :
identifier[E0] =[ identifier[Function] ( literal[string] + identifier[str] ( identifier[l] + literal[int] ), identifier[real] = identifier[real_amplitudes] )(* identifier[variables] )
keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] )]
identifier[omega_laser] =[ identifier[Symbol] ( literal[string] + identifier[str] ( identifier[l] + literal[int] ), identifier[positive] = keyword[True] )
keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] )]
keyword[return] identifier[E0] , identifier[omega_laser] | def define_laser_variables(Nl, real_amplitudes=False, variables=None):
"""Return the amplitudes and frequencies of Nl fields.
>>> E0, omega_laser = define_laser_variables(2)
>>> E0, omega_laser
([E_0^1, E_0^2], [varpi_1, varpi_2])
The amplitudes are complex by default:
>>> conjugate(E0[0])
conjugate(E_0^1)
But they can optionally be made real:
>>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True)
>>> conjugate(E0[0])
E_0^1
They can also be made explicit functions of given variables:
>>> from sympy import symbols
>>> t, z = symbols("t, z", real=True)
>>> E0, omega_laser = define_laser_variables(2, variables=[t, z])
>>> E0
[E_0^1(t, z), E_0^2(t, z)]
"""
if variables is None:
E0 = [Symbol('E_0^' + str(l + 1), real=real_amplitudes) for l in range(Nl)] # depends on [control=['if'], data=[]]
else:
E0 = [Function('E_0^' + str(l + 1), real=real_amplitudes)(*variables) for l in range(Nl)]
omega_laser = [Symbol('varpi_' + str(l + 1), positive=True) for l in range(Nl)]
return (E0, omega_laser) |
def squad_v2_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float:
""" Calculates Exact Match score between y_true and y_predicted
EM score uses the best matching y_true answer:
if y_pred equal at least to one answer in y_true then EM = 1, else EM = 0
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
exact match score : float
"""
EM_total = sum(normalize_answer(prediction) in map(normalize_answer, ground_truth)
for ground_truth, prediction in zip(y_true, y_predicted))
return 100 * EM_total / len(y_true) if len(y_true) > 0 else 0 | def function[squad_v2_exact_match, parameter[y_true, y_predicted]]:
constant[ Calculates Exact Match score between y_true and y_predicted
EM score uses the best matching y_true answer:
if y_pred equal at least to one answer in y_true then EM = 1, else EM = 0
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
exact match score : float
]
variable[EM_total] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da204347e80>]]
return[<ast.IfExp object at 0x7da204347d30>] | keyword[def] identifier[squad_v2_exact_match] ( identifier[y_true] : identifier[List] [ identifier[List] [ identifier[str] ]], identifier[y_predicted] : identifier[List] [ identifier[str] ])-> identifier[float] :
literal[string]
identifier[EM_total] = identifier[sum] ( identifier[normalize_answer] ( identifier[prediction] ) keyword[in] identifier[map] ( identifier[normalize_answer] , identifier[ground_truth] )
keyword[for] identifier[ground_truth] , identifier[prediction] keyword[in] identifier[zip] ( identifier[y_true] , identifier[y_predicted] ))
keyword[return] literal[int] * identifier[EM_total] / identifier[len] ( identifier[y_true] ) keyword[if] identifier[len] ( identifier[y_true] )> literal[int] keyword[else] literal[int] | def squad_v2_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float:
""" Calculates Exact Match score between y_true and y_predicted
EM score uses the best matching y_true answer:
if y_pred equal at least to one answer in y_true then EM = 1, else EM = 0
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
exact match score : float
"""
EM_total = sum((normalize_answer(prediction) in map(normalize_answer, ground_truth) for (ground_truth, prediction) in zip(y_true, y_predicted)))
return 100 * EM_total / len(y_true) if len(y_true) > 0 else 0 |
def run_segment_operation(outdoc, filenames, segments, use_segment_table, operation, result_name = 'RESULT', preserve = True):
"""
Performs an operation (intersect or union) across a set of segments.
That is, given a set of files each with segment definers DMT-FLAG1,
DMT-FLAG2 etc and a list of segments DMT-FLAG1,DMT-FLAG1 this returns
RESULT = (table 1's DMT-FLAG1 union table 2's DMT-FLAG1 union ...)
operation
(table 1's DMT-FLAG2 union table 2's DMT-FLAG2 union ...)
operation
etc
"""
proc_id = table.get_table(outdoc, lsctables.ProcessTable.tableName)[0].process_id
if preserve:
indoc = ligolw_add.ligolw_add(outdoc, filenames)
else:
indoc = ligolw_add.ligolw_add(ligolw.Document(), filenames)
# Start with a segment covering all of time, then
# intersect with each of the fields of interest
keys = segments.split(',')
if operation == INTERSECT:
sgmntlist = pycbc_glue.segments.segmentlist([pycbc_glue.segments.segment(-pycbc_glue.segments.infinity(), pycbc_glue.segments.infinity())])
for key in keys:
sgmntlist &= find_segments(indoc, key, use_segment_table)
elif operation == UNION:
sgmntlist = pycbc_glue.segments.segmentlist([])
for key in keys:
sgmntlist |= find_segments(indoc, key, use_segment_table)
elif operation == DIFF:
sgmntlist = find_segments(indoc, keys[0], use_segment_table)
for key in keys[1:]:
sgmntlist -= find_segments(indoc, key, use_segment_table)
else:
raise NameError("%s is not a known operation (intersect, union or diff)" % operation)
# Add a segment definer and segments
seg_def_id = add_to_segment_definer(outdoc, proc_id, '', result_name, 1)
if use_segment_table:
add_to_segment(outdoc, proc_id, seg_def_id, sgmntlist)
else:
add_to_segment_summary(outdoc, proc_id, seg_def_id, sgmntlist)
return outdoc, abs(sgmntlist) | def function[run_segment_operation, parameter[outdoc, filenames, segments, use_segment_table, operation, result_name, preserve]]:
constant[
Performs an operation (intersect or union) across a set of segments.
That is, given a set of files each with segment definers DMT-FLAG1,
DMT-FLAG2 etc and a list of segments DMT-FLAG1,DMT-FLAG1 this returns
RESULT = (table 1's DMT-FLAG1 union table 2's DMT-FLAG1 union ...)
operation
(table 1's DMT-FLAG2 union table 2's DMT-FLAG2 union ...)
operation
etc
]
variable[proc_id] assign[=] call[call[name[table].get_table, parameter[name[outdoc], name[lsctables].ProcessTable.tableName]]][constant[0]].process_id
if name[preserve] begin[:]
variable[indoc] assign[=] call[name[ligolw_add].ligolw_add, parameter[name[outdoc], name[filenames]]]
variable[keys] assign[=] call[name[segments].split, parameter[constant[,]]]
if compare[name[operation] equal[==] name[INTERSECT]] begin[:]
variable[sgmntlist] assign[=] call[name[pycbc_glue].segments.segmentlist, parameter[list[[<ast.Call object at 0x7da1b0b72a10>]]]]
for taget[name[key]] in starred[name[keys]] begin[:]
<ast.AugAssign object at 0x7da1b0b724a0>
variable[seg_def_id] assign[=] call[name[add_to_segment_definer], parameter[name[outdoc], name[proc_id], constant[], name[result_name], constant[1]]]
if name[use_segment_table] begin[:]
call[name[add_to_segment], parameter[name[outdoc], name[proc_id], name[seg_def_id], name[sgmntlist]]]
return[tuple[[<ast.Name object at 0x7da1b0b720b0>, <ast.Call object at 0x7da1b0b70940>]]] | keyword[def] identifier[run_segment_operation] ( identifier[outdoc] , identifier[filenames] , identifier[segments] , identifier[use_segment_table] , identifier[operation] , identifier[result_name] = literal[string] , identifier[preserve] = keyword[True] ):
literal[string]
identifier[proc_id] = identifier[table] . identifier[get_table] ( identifier[outdoc] , identifier[lsctables] . identifier[ProcessTable] . identifier[tableName] )[ literal[int] ]. identifier[process_id]
keyword[if] identifier[preserve] :
identifier[indoc] = identifier[ligolw_add] . identifier[ligolw_add] ( identifier[outdoc] , identifier[filenames] )
keyword[else] :
identifier[indoc] = identifier[ligolw_add] . identifier[ligolw_add] ( identifier[ligolw] . identifier[Document] (), identifier[filenames] )
identifier[keys] = identifier[segments] . identifier[split] ( literal[string] )
keyword[if] identifier[operation] == identifier[INTERSECT] :
identifier[sgmntlist] = identifier[pycbc_glue] . identifier[segments] . identifier[segmentlist] ([ identifier[pycbc_glue] . identifier[segments] . identifier[segment] (- identifier[pycbc_glue] . identifier[segments] . identifier[infinity] (), identifier[pycbc_glue] . identifier[segments] . identifier[infinity] ())])
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[sgmntlist] &= identifier[find_segments] ( identifier[indoc] , identifier[key] , identifier[use_segment_table] )
keyword[elif] identifier[operation] == identifier[UNION] :
identifier[sgmntlist] = identifier[pycbc_glue] . identifier[segments] . identifier[segmentlist] ([])
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[sgmntlist] |= identifier[find_segments] ( identifier[indoc] , identifier[key] , identifier[use_segment_table] )
keyword[elif] identifier[operation] == identifier[DIFF] :
identifier[sgmntlist] = identifier[find_segments] ( identifier[indoc] , identifier[keys] [ literal[int] ], identifier[use_segment_table] )
keyword[for] identifier[key] keyword[in] identifier[keys] [ literal[int] :]:
identifier[sgmntlist] -= identifier[find_segments] ( identifier[indoc] , identifier[key] , identifier[use_segment_table] )
keyword[else] :
keyword[raise] identifier[NameError] ( literal[string] % identifier[operation] )
identifier[seg_def_id] = identifier[add_to_segment_definer] ( identifier[outdoc] , identifier[proc_id] , literal[string] , identifier[result_name] , literal[int] )
keyword[if] identifier[use_segment_table] :
identifier[add_to_segment] ( identifier[outdoc] , identifier[proc_id] , identifier[seg_def_id] , identifier[sgmntlist] )
keyword[else] :
identifier[add_to_segment_summary] ( identifier[outdoc] , identifier[proc_id] , identifier[seg_def_id] , identifier[sgmntlist] )
keyword[return] identifier[outdoc] , identifier[abs] ( identifier[sgmntlist] ) | def run_segment_operation(outdoc, filenames, segments, use_segment_table, operation, result_name='RESULT', preserve=True):
"""
Performs an operation (intersect or union) across a set of segments.
That is, given a set of files each with segment definers DMT-FLAG1,
DMT-FLAG2 etc and a list of segments DMT-FLAG1,DMT-FLAG1 this returns
RESULT = (table 1's DMT-FLAG1 union table 2's DMT-FLAG1 union ...)
operation
(table 1's DMT-FLAG2 union table 2's DMT-FLAG2 union ...)
operation
etc
"""
proc_id = table.get_table(outdoc, lsctables.ProcessTable.tableName)[0].process_id
if preserve:
indoc = ligolw_add.ligolw_add(outdoc, filenames) # depends on [control=['if'], data=[]]
else:
indoc = ligolw_add.ligolw_add(ligolw.Document(), filenames)
# Start with a segment covering all of time, then
# intersect with each of the fields of interest
keys = segments.split(',')
if operation == INTERSECT:
sgmntlist = pycbc_glue.segments.segmentlist([pycbc_glue.segments.segment(-pycbc_glue.segments.infinity(), pycbc_glue.segments.infinity())])
for key in keys:
sgmntlist &= find_segments(indoc, key, use_segment_table) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
elif operation == UNION:
sgmntlist = pycbc_glue.segments.segmentlist([])
for key in keys:
sgmntlist |= find_segments(indoc, key, use_segment_table) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
elif operation == DIFF:
sgmntlist = find_segments(indoc, keys[0], use_segment_table)
for key in keys[1:]:
sgmntlist -= find_segments(indoc, key, use_segment_table) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
else:
raise NameError('%s is not a known operation (intersect, union or diff)' % operation)
# Add a segment definer and segments
seg_def_id = add_to_segment_definer(outdoc, proc_id, '', result_name, 1)
if use_segment_table:
add_to_segment(outdoc, proc_id, seg_def_id, sgmntlist) # depends on [control=['if'], data=[]]
else:
add_to_segment_summary(outdoc, proc_id, seg_def_id, sgmntlist)
return (outdoc, abs(sgmntlist)) |
def invocation_environment_dict(self):
"""A dict to use as this _Executable's execution environment.
This isn't made into an "algebraic" field because its contents (the keys of the dict) are
generally known to the specific class which is overriding this property. Implementations of this
property can then make use of the data in the algebraic fields to populate this dict.
:rtype: dict of string -> string
"""
lib_env_var = self._platform.resolve_for_enum_variant({
'darwin': 'DYLD_LIBRARY_PATH',
'linux': 'LD_LIBRARY_PATH',
})
return {
'PATH': create_path_env_var(self.path_entries),
lib_env_var: create_path_env_var(self.runtime_library_dirs),
} | def function[invocation_environment_dict, parameter[self]]:
constant[A dict to use as this _Executable's execution environment.
This isn't made into an "algebraic" field because its contents (the keys of the dict) are
generally known to the specific class which is overriding this property. Implementations of this
property can then make use of the data in the algebraic fields to populate this dict.
:rtype: dict of string -> string
]
variable[lib_env_var] assign[=] call[name[self]._platform.resolve_for_enum_variant, parameter[dictionary[[<ast.Constant object at 0x7da1b1e8ebf0>, <ast.Constant object at 0x7da1b1e8c190>], [<ast.Constant object at 0x7da1b1e03070>, <ast.Constant object at 0x7da1b1e01270>]]]]
return[dictionary[[<ast.Constant object at 0x7da1b1e00bb0>, <ast.Name object at 0x7da1b1e00130>], [<ast.Call object at 0x7da1b1e038e0>, <ast.Call object at 0x7da1b1e02290>]]] | keyword[def] identifier[invocation_environment_dict] ( identifier[self] ):
literal[string]
identifier[lib_env_var] = identifier[self] . identifier[_platform] . identifier[resolve_for_enum_variant] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
keyword[return] {
literal[string] : identifier[create_path_env_var] ( identifier[self] . identifier[path_entries] ),
identifier[lib_env_var] : identifier[create_path_env_var] ( identifier[self] . identifier[runtime_library_dirs] ),
} | def invocation_environment_dict(self):
"""A dict to use as this _Executable's execution environment.
This isn't made into an "algebraic" field because its contents (the keys of the dict) are
generally known to the specific class which is overriding this property. Implementations of this
property can then make use of the data in the algebraic fields to populate this dict.
:rtype: dict of string -> string
"""
lib_env_var = self._platform.resolve_for_enum_variant({'darwin': 'DYLD_LIBRARY_PATH', 'linux': 'LD_LIBRARY_PATH'})
return {'PATH': create_path_env_var(self.path_entries), lib_env_var: create_path_env_var(self.runtime_library_dirs)} |
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update) | def function[parse_www_authenticate_header, parameter[value, on_update]]:
constant[Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
]
if <ast.UnaryOp object at 0x7da18bccb8b0> begin[:]
return[call[name[WWWAuthenticate], parameter[]]]
<ast.Try object at 0x7da20e9b2b30>
return[call[name[WWWAuthenticate], parameter[name[auth_type], call[name[parse_dict_header], parameter[name[auth_info]]], name[on_update]]]] | keyword[def] identifier[parse_www_authenticate_header] ( identifier[value] , identifier[on_update] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return] identifier[WWWAuthenticate] ( identifier[on_update] = identifier[on_update] )
keyword[try] :
identifier[auth_type] , identifier[auth_info] = identifier[value] . identifier[split] ( keyword[None] , literal[int] )
identifier[auth_type] = identifier[auth_type] . identifier[lower] ()
keyword[except] ( identifier[ValueError] , identifier[AttributeError] ):
keyword[return] identifier[WWWAuthenticate] ( identifier[value] . identifier[strip] (). identifier[lower] (), identifier[on_update] = identifier[on_update] )
keyword[return] identifier[WWWAuthenticate] ( identifier[auth_type] , identifier[parse_dict_header] ( identifier[auth_info] ), identifier[on_update] ) | def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update) # depends on [control=['if'], data=[]]
try:
(auth_type, auth_info) = value.split(None, 1)
auth_type = auth_type.lower() # depends on [control=['try'], data=[]]
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update) # depends on [control=['except'], data=[]]
return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update) |
def make_agent() -> EcommerceAgent:
"""Make an agent
Returns:
agent: created Ecommerce agent
"""
config_path = find_config('tfidf_retrieve')
skill = build_model(config_path)
agent = EcommerceAgent(skills=[skill])
return agent | def function[make_agent, parameter[]]:
constant[Make an agent
Returns:
agent: created Ecommerce agent
]
variable[config_path] assign[=] call[name[find_config], parameter[constant[tfidf_retrieve]]]
variable[skill] assign[=] call[name[build_model], parameter[name[config_path]]]
variable[agent] assign[=] call[name[EcommerceAgent], parameter[]]
return[name[agent]] | keyword[def] identifier[make_agent] ()-> identifier[EcommerceAgent] :
literal[string]
identifier[config_path] = identifier[find_config] ( literal[string] )
identifier[skill] = identifier[build_model] ( identifier[config_path] )
identifier[agent] = identifier[EcommerceAgent] ( identifier[skills] =[ identifier[skill] ])
keyword[return] identifier[agent] | def make_agent() -> EcommerceAgent:
"""Make an agent
Returns:
agent: created Ecommerce agent
"""
config_path = find_config('tfidf_retrieve')
skill = build_model(config_path)
agent = EcommerceAgent(skills=[skill])
return agent |
def config_filename(filename):
"""
Obtains the first filename found that is included in one of the configuration folders.
This function returs the full path for the file.
* It is useful for files that are not config-formatted (e.g. hosts files, json, etc.)
that will be read using other mechanisms
"""
global _ETC_PATHS
if filename.startswith('/'):
_LOGGER.info("using absolute path for filename \"%s\"" % filename)
return filename
import os.path
for fpath in _ETC_PATHS:
current_path = "%s/%s" % (fpath, filename)
if os.path.isfile(current_path):
current_path = os.path.realpath(current_path)
_LOGGER.info("using path \"%s\" for filename \"%s\"" % (current_path, filename))
return current_path
_LOGGER.info("using path \"%s\" for filename \"%s\"" % (filename, filename))
return filename | def function[config_filename, parameter[filename]]:
constant[
Obtains the first filename found that is included in one of the configuration folders.
This function returs the full path for the file.
* It is useful for files that are not config-formatted (e.g. hosts files, json, etc.)
that will be read using other mechanisms
]
<ast.Global object at 0x7da20c6aaf50>
if call[name[filename].startswith, parameter[constant[/]]] begin[:]
call[name[_LOGGER].info, parameter[binary_operation[constant[using absolute path for filename "%s"] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
return[name[filename]]
import module[os.path]
for taget[name[fpath]] in starred[name[_ETC_PATHS]] begin[:]
variable[current_path] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6aa080>, <ast.Name object at 0x7da20c6ab8b0>]]]
if call[name[os].path.isfile, parameter[name[current_path]]] begin[:]
variable[current_path] assign[=] call[name[os].path.realpath, parameter[name[current_path]]]
call[name[_LOGGER].info, parameter[binary_operation[constant[using path "%s" for filename "%s"] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6aae00>, <ast.Name object at 0x7da20c6aa0b0>]]]]]
return[name[current_path]]
call[name[_LOGGER].info, parameter[binary_operation[constant[using path "%s" for filename "%s"] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a46a0>, <ast.Name object at 0x7da2054a7d00>]]]]]
return[name[filename]] | keyword[def] identifier[config_filename] ( identifier[filename] ):
literal[string]
keyword[global] identifier[_ETC_PATHS]
keyword[if] identifier[filename] . identifier[startswith] ( literal[string] ):
identifier[_LOGGER] . identifier[info] ( literal[string] % identifier[filename] )
keyword[return] identifier[filename]
keyword[import] identifier[os] . identifier[path]
keyword[for] identifier[fpath] keyword[in] identifier[_ETC_PATHS] :
identifier[current_path] = literal[string] %( identifier[fpath] , identifier[filename] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[current_path] ):
identifier[current_path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[current_path] )
identifier[_LOGGER] . identifier[info] ( literal[string] %( identifier[current_path] , identifier[filename] ))
keyword[return] identifier[current_path]
identifier[_LOGGER] . identifier[info] ( literal[string] %( identifier[filename] , identifier[filename] ))
keyword[return] identifier[filename] | def config_filename(filename):
"""
Obtains the first filename found that is included in one of the configuration folders.
This function returs the full path for the file.
* It is useful for files that are not config-formatted (e.g. hosts files, json, etc.)
that will be read using other mechanisms
"""
global _ETC_PATHS
if filename.startswith('/'):
_LOGGER.info('using absolute path for filename "%s"' % filename)
return filename # depends on [control=['if'], data=[]]
import os.path
for fpath in _ETC_PATHS:
current_path = '%s/%s' % (fpath, filename)
if os.path.isfile(current_path):
current_path = os.path.realpath(current_path)
_LOGGER.info('using path "%s" for filename "%s"' % (current_path, filename))
return current_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fpath']]
_LOGGER.info('using path "%s" for filename "%s"' % (filename, filename))
return filename |
def set_regionfiles(self, filenames):
"""
This method directly sets the region files for this instance to use.
It assumes the filenames are in the form r.<x-digit>.<z-digit>.<extension>
"""
for filename in filenames:
# Assume that filenames have the name r.<x-digit>.<z-digit>.<extension>
m = re.match(r"r.(\-?\d+).(\-?\d+)."+self.extension, os.path.basename(filename))
if m:
x = int(m.group(1))
z = int(m.group(2))
else:
# Only raised if a .mca of .mcr file exists which does not comply to the
# r.<x-digit>.<z-digit>.<extension> filename format. This may raise false
# errors if a copy is made, e.g. "r.0.-1 copy.mca". If this is an issue, override
# get_filenames(). In most cases, it is an error, and we like to raise that.
# Changed, no longer raise error, because we want to continue the loop.
# raise UnknownWorldFormat("Unrecognized filename format %s" % os.path.basename(filename))
# TODO: log to stderr using logging facility.
pass
self.regionfiles[(x,z)] = filename | def function[set_regionfiles, parameter[self, filenames]]:
constant[
This method directly sets the region files for this instance to use.
It assumes the filenames are in the form r.<x-digit>.<z-digit>.<extension>
]
for taget[name[filename]] in starred[name[filenames]] begin[:]
variable[m] assign[=] call[name[re].match, parameter[binary_operation[constant[r.(\-?\d+).(\-?\d+).] + name[self].extension], call[name[os].path.basename, parameter[name[filename]]]]]
if name[m] begin[:]
variable[x] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[1]]]]]
variable[z] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[2]]]]]
call[name[self].regionfiles][tuple[[<ast.Name object at 0x7da18c4cfc70>, <ast.Name object at 0x7da18c4cf850>]]] assign[=] name[filename] | keyword[def] identifier[set_regionfiles] ( identifier[self] , identifier[filenames] ):
literal[string]
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
identifier[m] = identifier[re] . identifier[match] ( literal[string] + identifier[self] . identifier[extension] , identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] ))
keyword[if] identifier[m] :
identifier[x] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[z] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
keyword[else] :
keyword[pass]
identifier[self] . identifier[regionfiles] [( identifier[x] , identifier[z] )]= identifier[filename] | def set_regionfiles(self, filenames):
"""
This method directly sets the region files for this instance to use.
It assumes the filenames are in the form r.<x-digit>.<z-digit>.<extension>
"""
for filename in filenames:
# Assume that filenames have the name r.<x-digit>.<z-digit>.<extension>
m = re.match('r.(\\-?\\d+).(\\-?\\d+).' + self.extension, os.path.basename(filename))
if m:
x = int(m.group(1))
z = int(m.group(2)) # depends on [control=['if'], data=[]]
else:
# Only raised if a .mca of .mcr file exists which does not comply to the
# r.<x-digit>.<z-digit>.<extension> filename format. This may raise false
# errors if a copy is made, e.g. "r.0.-1 copy.mca". If this is an issue, override
# get_filenames(). In most cases, it is an error, and we like to raise that.
# Changed, no longer raise error, because we want to continue the loop.
# raise UnknownWorldFormat("Unrecognized filename format %s" % os.path.basename(filename))
# TODO: log to stderr using logging facility.
pass
self.regionfiles[x, z] = filename # depends on [control=['for'], data=['filename']] |
def write(self, b):
'''write some bytes'''
from . import mavutil
self.debug("sending '%s' (0x%02x) of len %u\n" % (b, ord(b[0]), len(b)), 2)
while len(b) > 0:
n = len(b)
if n > 70:
n = 70
buf = [ord(x) for x in b[:n]]
buf.extend([0]*(70-len(buf)))
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,
0,
0,
n,
buf)
b = b[n:] | def function[write, parameter[self, b]]:
constant[write some bytes]
from relative_module[None] import module[mavutil]
call[name[self].debug, parameter[binary_operation[constant[sending '%s' (0x%02x) of len %u
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e9b2d40>, <ast.Call object at 0x7da20e9b3100>, <ast.Call object at 0x7da20e9b3a60>]]], constant[2]]]
while compare[call[name[len], parameter[name[b]]] greater[>] constant[0]] begin[:]
variable[n] assign[=] call[name[len], parameter[name[b]]]
if compare[name[n] greater[>] constant[70]] begin[:]
variable[n] assign[=] constant[70]
variable[buf] assign[=] <ast.ListComp object at 0x7da20e9b2890>
call[name[buf].extend, parameter[binary_operation[list[[<ast.Constant object at 0x7da20e9b0a30>]] * binary_operation[constant[70] - call[name[len], parameter[name[buf]]]]]]]
call[name[self].mav.mav.serial_control_send, parameter[name[self].port, binary_operation[name[mavutil].mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE <ast.BitOr object at 0x7da2590d6aa0> name[mavutil].mavlink.SERIAL_CONTROL_FLAG_RESPOND], constant[0], constant[0], name[n], name[buf]]]
variable[b] assign[=] call[name[b]][<ast.Slice object at 0x7da20e9b0040>] | keyword[def] identifier[write] ( identifier[self] , identifier[b] ):
literal[string]
keyword[from] . keyword[import] identifier[mavutil]
identifier[self] . identifier[debug] ( literal[string] %( identifier[b] , identifier[ord] ( identifier[b] [ literal[int] ]), identifier[len] ( identifier[b] )), literal[int] )
keyword[while] identifier[len] ( identifier[b] )> literal[int] :
identifier[n] = identifier[len] ( identifier[b] )
keyword[if] identifier[n] > literal[int] :
identifier[n] = literal[int]
identifier[buf] =[ identifier[ord] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[b] [: identifier[n] ]]
identifier[buf] . identifier[extend] ([ literal[int] ]*( literal[int] - identifier[len] ( identifier[buf] )))
identifier[self] . identifier[mav] . identifier[mav] . identifier[serial_control_send] ( identifier[self] . identifier[port] ,
identifier[mavutil] . identifier[mavlink] . identifier[SERIAL_CONTROL_FLAG_EXCLUSIVE] |
identifier[mavutil] . identifier[mavlink] . identifier[SERIAL_CONTROL_FLAG_RESPOND] ,
literal[int] ,
literal[int] ,
identifier[n] ,
identifier[buf] )
identifier[b] = identifier[b] [ identifier[n] :] | def write(self, b):
"""write some bytes"""
from . import mavutil
self.debug("sending '%s' (0x%02x) of len %u\n" % (b, ord(b[0]), len(b)), 2)
while len(b) > 0:
n = len(b)
if n > 70:
n = 70 # depends on [control=['if'], data=['n']]
buf = [ord(x) for x in b[:n]]
buf.extend([0] * (70 - len(buf)))
self.mav.mav.serial_control_send(self.port, mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE | mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND, 0, 0, n, buf)
b = b[n:] # depends on [control=['while'], data=[]] |
def begin(self):
""" Begin a transaction """
if self.journal != None:
raise Exception('Storage is already active, nested begin not supported')
# under normal operation journal is deleted at end of transaction
# if it does exist we need to roll back
if os.path.isfile(self.j_file): self.rollback()
self.journal = open(self.j_file, 'w') | def function[begin, parameter[self]]:
constant[ Begin a transaction ]
if compare[name[self].journal not_equal[!=] constant[None]] begin[:]
<ast.Raise object at 0x7da18eb55b40>
if call[name[os].path.isfile, parameter[name[self].j_file]] begin[:]
call[name[self].rollback, parameter[]]
name[self].journal assign[=] call[name[open], parameter[name[self].j_file, constant[w]]] | keyword[def] identifier[begin] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[journal] != keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[j_file] ): identifier[self] . identifier[rollback] ()
identifier[self] . identifier[journal] = identifier[open] ( identifier[self] . identifier[j_file] , literal[string] ) | def begin(self):
""" Begin a transaction """
if self.journal != None:
raise Exception('Storage is already active, nested begin not supported') # depends on [control=['if'], data=[]]
# under normal operation journal is deleted at end of transaction
# if it does exist we need to roll back
if os.path.isfile(self.j_file):
self.rollback() # depends on [control=['if'], data=[]]
self.journal = open(self.j_file, 'w') |
def setLayout(self, value):
"""
Sets the worksheet layout, keeping it sorted by position
:param value: the layout to set
"""
new_layout = sorted(value, key=lambda k: k['position'])
self.getField('Layout').set(self, new_layout) | def function[setLayout, parameter[self, value]]:
constant[
Sets the worksheet layout, keeping it sorted by position
:param value: the layout to set
]
variable[new_layout] assign[=] call[name[sorted], parameter[name[value]]]
call[call[name[self].getField, parameter[constant[Layout]]].set, parameter[name[self], name[new_layout]]] | keyword[def] identifier[setLayout] ( identifier[self] , identifier[value] ):
literal[string]
identifier[new_layout] = identifier[sorted] ( identifier[value] , identifier[key] = keyword[lambda] identifier[k] : identifier[k] [ literal[string] ])
identifier[self] . identifier[getField] ( literal[string] ). identifier[set] ( identifier[self] , identifier[new_layout] ) | def setLayout(self, value):
"""
Sets the worksheet layout, keeping it sorted by position
:param value: the layout to set
"""
new_layout = sorted(value, key=lambda k: k['position'])
self.getField('Layout').set(self, new_layout) |
def add_raw(self, raw):
"""
Adds row aggregation state at the query
:param raw: list of raw stages or a dict of raw stage
:return: The current object
"""
if type(raw) == list:
self._q += raw
if type(raw) == dict:
self._q.append(raw)
return self | def function[add_raw, parameter[self, raw]]:
constant[
Adds row aggregation state at the query
:param raw: list of raw stages or a dict of raw stage
:return: The current object
]
if compare[call[name[type], parameter[name[raw]]] equal[==] name[list]] begin[:]
<ast.AugAssign object at 0x7da2054a7100>
if compare[call[name[type], parameter[name[raw]]] equal[==] name[dict]] begin[:]
call[name[self]._q.append, parameter[name[raw]]]
return[name[self]] | keyword[def] identifier[add_raw] ( identifier[self] , identifier[raw] ):
literal[string]
keyword[if] identifier[type] ( identifier[raw] )== identifier[list] :
identifier[self] . identifier[_q] += identifier[raw]
keyword[if] identifier[type] ( identifier[raw] )== identifier[dict] :
identifier[self] . identifier[_q] . identifier[append] ( identifier[raw] )
keyword[return] identifier[self] | def add_raw(self, raw):
"""
Adds row aggregation state at the query
:param raw: list of raw stages or a dict of raw stage
:return: The current object
"""
if type(raw) == list:
self._q += raw # depends on [control=['if'], data=[]]
if type(raw) == dict:
self._q.append(raw) # depends on [control=['if'], data=[]]
return self |
def image2working(self,i):
"""Transform images i provided into the specified working
color space."""
return self.colorspace.convert(self.image_space,
self.working_space, i) | def function[image2working, parameter[self, i]]:
constant[Transform images i provided into the specified working
color space.]
return[call[name[self].colorspace.convert, parameter[name[self].image_space, name[self].working_space, name[i]]]] | keyword[def] identifier[image2working] ( identifier[self] , identifier[i] ):
literal[string]
keyword[return] identifier[self] . identifier[colorspace] . identifier[convert] ( identifier[self] . identifier[image_space] ,
identifier[self] . identifier[working_space] , identifier[i] ) | def image2working(self, i):
"""Transform images i provided into the specified working
color space."""
return self.colorspace.convert(self.image_space, self.working_space, i) |
def change_password(self, new_password, email):
"""
Changes the login password
:param new_password: The new login password to set for the account
:param email: The current email of the account
"""
log.info("[+] Changing the password of the account")
return self._send_xmpp_element(account.ChangePasswordRequest(self.password, new_password, email, self.username)) | def function[change_password, parameter[self, new_password, email]]:
constant[
Changes the login password
:param new_password: The new login password to set for the account
:param email: The current email of the account
]
call[name[log].info, parameter[constant[[+] Changing the password of the account]]]
return[call[name[self]._send_xmpp_element, parameter[call[name[account].ChangePasswordRequest, parameter[name[self].password, name[new_password], name[email], name[self].username]]]]] | keyword[def] identifier[change_password] ( identifier[self] , identifier[new_password] , identifier[email] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] )
keyword[return] identifier[self] . identifier[_send_xmpp_element] ( identifier[account] . identifier[ChangePasswordRequest] ( identifier[self] . identifier[password] , identifier[new_password] , identifier[email] , identifier[self] . identifier[username] )) | def change_password(self, new_password, email):
"""
Changes the login password
:param new_password: The new login password to set for the account
:param email: The current email of the account
"""
log.info('[+] Changing the password of the account')
return self._send_xmpp_element(account.ChangePasswordRequest(self.password, new_password, email, self.username)) |
def list_servers(self, nicknames=None):
"""
Iterate through the servers of the server group with the specified
nicknames, or the single server with the specified nickname, and yield
a `ServerDefinition` object for each server.
nicknames may be: None, string defining a nickname or list of nicknames
"""
if not nicknames:
return self.list_all_servers()
if isinstance(nicknames, six.string_types):
nicknames = [nicknames]
sd_list = []
sd_nick_list = []
for nickname in nicknames:
if nickname in self._servers:
sd_list.append(self.get_server(nickname))
elif nickname in self._server_groups:
for item_nick in self._server_groups[nickname]:
for sd in self.list_servers(item_nick):
if sd.nickname not in sd_nick_list:
sd_nick_list.append(sd.nickname)
sd_list.append(sd)
else:
raise ValueError(
"Server group or server nickname {0!r} not found in WBEM "
"server definition file {1!r}".
format(nickname, self._filepath))
return sd_list | def function[list_servers, parameter[self, nicknames]]:
constant[
Iterate through the servers of the server group with the specified
nicknames, or the single server with the specified nickname, and yield
a `ServerDefinition` object for each server.
nicknames may be: None, string defining a nickname or list of nicknames
]
if <ast.UnaryOp object at 0x7da204567370> begin[:]
return[call[name[self].list_all_servers, parameter[]]]
if call[name[isinstance], parameter[name[nicknames], name[six].string_types]] begin[:]
variable[nicknames] assign[=] list[[<ast.Name object at 0x7da204567ac0>]]
variable[sd_list] assign[=] list[[]]
variable[sd_nick_list] assign[=] list[[]]
for taget[name[nickname]] in starred[name[nicknames]] begin[:]
if compare[name[nickname] in name[self]._servers] begin[:]
call[name[sd_list].append, parameter[call[name[self].get_server, parameter[name[nickname]]]]]
return[name[sd_list]] | keyword[def] identifier[list_servers] ( identifier[self] , identifier[nicknames] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[nicknames] :
keyword[return] identifier[self] . identifier[list_all_servers] ()
keyword[if] identifier[isinstance] ( identifier[nicknames] , identifier[six] . identifier[string_types] ):
identifier[nicknames] =[ identifier[nicknames] ]
identifier[sd_list] =[]
identifier[sd_nick_list] =[]
keyword[for] identifier[nickname] keyword[in] identifier[nicknames] :
keyword[if] identifier[nickname] keyword[in] identifier[self] . identifier[_servers] :
identifier[sd_list] . identifier[append] ( identifier[self] . identifier[get_server] ( identifier[nickname] ))
keyword[elif] identifier[nickname] keyword[in] identifier[self] . identifier[_server_groups] :
keyword[for] identifier[item_nick] keyword[in] identifier[self] . identifier[_server_groups] [ identifier[nickname] ]:
keyword[for] identifier[sd] keyword[in] identifier[self] . identifier[list_servers] ( identifier[item_nick] ):
keyword[if] identifier[sd] . identifier[nickname] keyword[not] keyword[in] identifier[sd_nick_list] :
identifier[sd_nick_list] . identifier[append] ( identifier[sd] . identifier[nickname] )
identifier[sd_list] . identifier[append] ( identifier[sd] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] .
identifier[format] ( identifier[nickname] , identifier[self] . identifier[_filepath] ))
keyword[return] identifier[sd_list] | def list_servers(self, nicknames=None):
"""
Iterate through the servers of the server group with the specified
nicknames, or the single server with the specified nickname, and yield
a `ServerDefinition` object for each server.
nicknames may be: None, string defining a nickname or list of nicknames
"""
if not nicknames:
return self.list_all_servers() # depends on [control=['if'], data=[]]
if isinstance(nicknames, six.string_types):
nicknames = [nicknames] # depends on [control=['if'], data=[]]
sd_list = []
sd_nick_list = []
for nickname in nicknames:
if nickname in self._servers:
sd_list.append(self.get_server(nickname)) # depends on [control=['if'], data=['nickname']]
elif nickname in self._server_groups:
for item_nick in self._server_groups[nickname]:
for sd in self.list_servers(item_nick):
if sd.nickname not in sd_nick_list:
sd_nick_list.append(sd.nickname)
sd_list.append(sd) # depends on [control=['if'], data=['sd_nick_list']] # depends on [control=['for'], data=['sd']] # depends on [control=['for'], data=['item_nick']] # depends on [control=['if'], data=['nickname']]
else:
raise ValueError('Server group or server nickname {0!r} not found in WBEM server definition file {1!r}'.format(nickname, self._filepath)) # depends on [control=['for'], data=['nickname']]
return sd_list |
def _from_dict(cls, _dict):
"""Initialize a MessageOutputDebug object from a json dictionary."""
args = {}
if 'nodes_visited' in _dict:
args['nodes_visited'] = [
DialogNodesVisited._from_dict(x)
for x in (_dict.get('nodes_visited'))
]
if 'log_messages' in _dict:
args['log_messages'] = [
DialogLogMessage._from_dict(x)
for x in (_dict.get('log_messages'))
]
if 'branch_exited' in _dict:
args['branch_exited'] = _dict.get('branch_exited')
if 'branch_exited_reason' in _dict:
args['branch_exited_reason'] = _dict.get('branch_exited_reason')
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a MessageOutputDebug object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[nodes_visited] in name[_dict]] begin[:]
call[name[args]][constant[nodes_visited]] assign[=] <ast.ListComp object at 0x7da1b1b46e00>
if compare[constant[log_messages] in name[_dict]] begin[:]
call[name[args]][constant[log_messages]] assign[=] <ast.ListComp object at 0x7da1b1b44460>
if compare[constant[branch_exited] in name[_dict]] begin[:]
call[name[args]][constant[branch_exited]] assign[=] call[name[_dict].get, parameter[constant[branch_exited]]]
if compare[constant[branch_exited_reason] in name[_dict]] begin[:]
call[name[args]][constant[branch_exited_reason]] assign[=] call[name[_dict].get, parameter[constant[branch_exited_reason]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[DialogNodesVisited] . identifier[_from_dict] ( identifier[x] )
keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[DialogLogMessage] . identifier[_from_dict] ( identifier[x] )
keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a MessageOutputDebug object from a json dictionary."""
args = {}
if 'nodes_visited' in _dict:
args['nodes_visited'] = [DialogNodesVisited._from_dict(x) for x in _dict.get('nodes_visited')] # depends on [control=['if'], data=['_dict']]
if 'log_messages' in _dict:
args['log_messages'] = [DialogLogMessage._from_dict(x) for x in _dict.get('log_messages')] # depends on [control=['if'], data=['_dict']]
if 'branch_exited' in _dict:
args['branch_exited'] = _dict.get('branch_exited') # depends on [control=['if'], data=['_dict']]
if 'branch_exited_reason' in _dict:
args['branch_exited_reason'] = _dict.get('branch_exited_reason') # depends on [control=['if'], data=['_dict']]
return cls(**args) |
def query_more(self, next_records_identifier, identifier_is_url=False, **kwargs):
"""
Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
:param next_records_identifier: either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
:param identifier_is_url: True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = (u'https://{instance}{next_record_url}'
.format(instance=self.hostname,
next_record_url=next_records_identifier))
else:
url = self._get_norm_query_url() + '{next_record_id}'
url = url.format(next_record_id=next_records_identifier)
response = requests.get(url, headers=self._get_rest_headers(), **kwargs)
response.raise_for_status()
return response.json() | def function[query_more, parameter[self, next_records_identifier, identifier_is_url]]:
constant[
Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
:param next_records_identifier: either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
:param identifier_is_url: True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
]
if name[identifier_is_url] begin[:]
variable[url] assign[=] call[constant[https://{instance}{next_record_url}].format, parameter[]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
call[name[response].raise_for_status, parameter[]]
return[call[name[response].json, parameter[]]] | keyword[def] identifier[query_more] ( identifier[self] , identifier[next_records_identifier] , identifier[identifier_is_url] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[identifier_is_url] :
identifier[url] =( literal[string]
. identifier[format] ( identifier[instance] = identifier[self] . identifier[hostname] ,
identifier[next_record_url] = identifier[next_records_identifier] ))
keyword[else] :
identifier[url] = identifier[self] . identifier[_get_norm_query_url] ()+ literal[string]
identifier[url] = identifier[url] . identifier[format] ( identifier[next_record_id] = identifier[next_records_identifier] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[self] . identifier[_get_rest_headers] (),** identifier[kwargs] )
identifier[response] . identifier[raise_for_status] ()
keyword[return] identifier[response] . identifier[json] () | def query_more(self, next_records_identifier, identifier_is_url=False, **kwargs):
"""
Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
:param next_records_identifier: either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
:param identifier_is_url: True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = u'https://{instance}{next_record_url}'.format(instance=self.hostname, next_record_url=next_records_identifier) # depends on [control=['if'], data=[]]
else:
url = self._get_norm_query_url() + '{next_record_id}'
url = url.format(next_record_id=next_records_identifier)
response = requests.get(url, headers=self._get_rest_headers(), **kwargs)
response.raise_for_status()
return response.json() |
def calc_filenames(self, is_correct: bool, actual_output: bool, threshold=0.5) -> list:
"""Find a list of files with the given classification"""
return [
filename
for output, target, filename in zip(self.outputs, self.targets, self.filenames)
if ((output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold)
] | def function[calc_filenames, parameter[self, is_correct, actual_output, threshold]]:
constant[Find a list of files with the given classification]
return[<ast.ListComp object at 0x7da18ede5000>] | keyword[def] identifier[calc_filenames] ( identifier[self] , identifier[is_correct] : identifier[bool] , identifier[actual_output] : identifier[bool] , identifier[threshold] = literal[int] )-> identifier[list] :
literal[string]
keyword[return] [
identifier[filename]
keyword[for] identifier[output] , identifier[target] , identifier[filename] keyword[in] identifier[zip] ( identifier[self] . identifier[outputs] , identifier[self] . identifier[targets] , identifier[self] . identifier[filenames] )
keyword[if] (( identifier[output] > identifier[threshold] )== identifier[bool] ( identifier[target] ))== identifier[is_correct] keyword[and] identifier[actual_output] == identifier[bool] ( identifier[output] > identifier[threshold] )
] | def calc_filenames(self, is_correct: bool, actual_output: bool, threshold=0.5) -> list:
"""Find a list of files with the given classification"""
return [filename for (output, target, filename) in zip(self.outputs, self.targets, self.filenames) if ((output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold)] |
def cherry_pick(self):
""" git cherry-pick -x <commit_sha1> """
cmd = ["git", "cherry-pick", "-x", self.commit_sha1]
try:
self.run_cmd(cmd)
except subprocess.CalledProcessError as err:
click.echo(f"Error cherry-pick {self.commit_sha1}.")
click.echo(err.output)
raise CherryPickException(f"Error cherry-pick {self.commit_sha1}.") | def function[cherry_pick, parameter[self]]:
constant[ git cherry-pick -x <commit_sha1> ]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18eb55990>, <ast.Constant object at 0x7da18eb54520>, <ast.Constant object at 0x7da18eb55030>, <ast.Attribute object at 0x7da18eb541f0>]]
<ast.Try object at 0x7da18eb57a90> | keyword[def] identifier[cherry_pick] ( identifier[self] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] , literal[string] , identifier[self] . identifier[commit_sha1] ]
keyword[try] :
identifier[self] . identifier[run_cmd] ( identifier[cmd] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[err] :
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( identifier[err] . identifier[output] )
keyword[raise] identifier[CherryPickException] ( literal[string] ) | def cherry_pick(self):
""" git cherry-pick -x <commit_sha1> """
cmd = ['git', 'cherry-pick', '-x', self.commit_sha1]
try:
self.run_cmd(cmd) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as err:
click.echo(f'Error cherry-pick {self.commit_sha1}.')
click.echo(err.output)
raise CherryPickException(f'Error cherry-pick {self.commit_sha1}.') # depends on [control=['except'], data=['err']] |
def get_arg_dict(argv=None, prefix_list=['--'], type_hints={}):
r"""
Yet another way for parsing args
CommandLine:
python -m utool.util_arg --exec-get_arg_dict
python -m utool.util_arg --test-get_arg_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_arg import * # NOQA
>>> import utool as ut
>>> import shlex
>>> argv = shlex.split('--test-show_name --name=IBEIS_PZ_0303 --db testdb3 --save "~/latex/crall-candidacy-2015/figures/IBEIS_PZ_0303.jpg" --dpath figures --caption="Shadowed" --figsize=11,3 --no-figtitle -t foo bar baz biz --notitle')
>>> arg_dict = ut.get_arg_dict(argv, prefix_list=['--', '-'], type_hints={'t': list})
>>> result = ut.repr2(arg_dict, nl=1)
>>> # verify results
>>> print(result)
{
'caption': 'Shadowed',
'db': 'testdb3',
'dpath': 'figures',
'figsize': '11,3',
'name': 'IBEIS_PZ_0303',
'no-figtitle': True,
'notitle': True,
'save': '~/latex/crall-candidacy-2015/figures/IBEIS_PZ_0303.jpg',
't': ['foo', 'bar', 'baz', 'biz'],
'test-show_name': True,
}
"""
if argv is None:
argv = sys.argv
arg_dict = {}
def startswith_prefix(arg):
return any([arg.startswith(prefix) for prefix in prefix_list])
def argx_has_value(argv, argx):
# Check if has a value
if argv[argx].find('=') > -1:
return True
if argx + 1 < len(argv) and not startswith_prefix(argv[argx + 1]):
return True
return False
def get_arg_value(argv, argx, argname):
if argv[argx].find('=') > -1:
return '='.join(argv[argx].split('=')[1:])
else:
type_ = type_hints.get(argname, None)
if type_ is None:
return argv[argx + 1]
else:
return parse_arglist_hack(argx, argv=argv)
for argx in range(len(argv)):
arg = argv[argx]
for prefix in prefix_list:
if arg.startswith(prefix):
argname = arg[len(prefix):]
if argx_has_value(argv, argx):
if arg.find('=') > -1:
argname = arg[len(prefix):arg.find('=')]
argvalue = get_arg_value(argv, argx, argname)
arg_dict[argname] = argvalue
else:
arg_dict[argname] = True
break
return arg_dict | def function[get_arg_dict, parameter[argv, prefix_list, type_hints]]:
constant[
Yet another way for parsing args
CommandLine:
python -m utool.util_arg --exec-get_arg_dict
python -m utool.util_arg --test-get_arg_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_arg import * # NOQA
>>> import utool as ut
>>> import shlex
>>> argv = shlex.split('--test-show_name --name=IBEIS_PZ_0303 --db testdb3 --save "~/latex/crall-candidacy-2015/figures/IBEIS_PZ_0303.jpg" --dpath figures --caption="Shadowed" --figsize=11,3 --no-figtitle -t foo bar baz biz --notitle')
>>> arg_dict = ut.get_arg_dict(argv, prefix_list=['--', '-'], type_hints={'t': list})
>>> result = ut.repr2(arg_dict, nl=1)
>>> # verify results
>>> print(result)
{
'caption': 'Shadowed',
'db': 'testdb3',
'dpath': 'figures',
'figsize': '11,3',
'name': 'IBEIS_PZ_0303',
'no-figtitle': True,
'notitle': True,
'save': '~/latex/crall-candidacy-2015/figures/IBEIS_PZ_0303.jpg',
't': ['foo', 'bar', 'baz', 'biz'],
'test-show_name': True,
}
]
if compare[name[argv] is constant[None]] begin[:]
variable[argv] assign[=] name[sys].argv
variable[arg_dict] assign[=] dictionary[[], []]
def function[startswith_prefix, parameter[arg]]:
return[call[name[any], parameter[<ast.ListComp object at 0x7da1b246ba60>]]]
def function[argx_has_value, parameter[argv, argx]]:
if compare[call[call[name[argv]][name[argx]].find, parameter[constant[=]]] greater[>] <ast.UnaryOp object at 0x7da1b246bb50>] begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b2469810> begin[:]
return[constant[True]]
return[constant[False]]
def function[get_arg_value, parameter[argv, argx, argname]]:
if compare[call[call[name[argv]][name[argx]].find, parameter[constant[=]]] greater[>] <ast.UnaryOp object at 0x7da1b2469780>] begin[:]
return[call[constant[=].join, parameter[call[call[call[name[argv]][name[argx]].split, parameter[constant[=]]]][<ast.Slice object at 0x7da1b246a950>]]]]
for taget[name[argx]] in starred[call[name[range], parameter[call[name[len], parameter[name[argv]]]]]] begin[:]
variable[arg] assign[=] call[name[argv]][name[argx]]
for taget[name[prefix]] in starred[name[prefix_list]] begin[:]
if call[name[arg].startswith, parameter[name[prefix]]] begin[:]
variable[argname] assign[=] call[name[arg]][<ast.Slice object at 0x7da1b2380ac0>]
if call[name[argx_has_value], parameter[name[argv], name[argx]]] begin[:]
if compare[call[name[arg].find, parameter[constant[=]]] greater[>] <ast.UnaryOp object at 0x7da1b2380910>] begin[:]
variable[argname] assign[=] call[name[arg]][<ast.Slice object at 0x7da1b253a9e0>]
variable[argvalue] assign[=] call[name[get_arg_value], parameter[name[argv], name[argx], name[argname]]]
call[name[arg_dict]][name[argname]] assign[=] name[argvalue]
break
return[name[arg_dict]] | keyword[def] identifier[get_arg_dict] ( identifier[argv] = keyword[None] , identifier[prefix_list] =[ literal[string] ], identifier[type_hints] ={}):
literal[string]
keyword[if] identifier[argv] keyword[is] keyword[None] :
identifier[argv] = identifier[sys] . identifier[argv]
identifier[arg_dict] ={}
keyword[def] identifier[startswith_prefix] ( identifier[arg] ):
keyword[return] identifier[any] ([ identifier[arg] . identifier[startswith] ( identifier[prefix] ) keyword[for] identifier[prefix] keyword[in] identifier[prefix_list] ])
keyword[def] identifier[argx_has_value] ( identifier[argv] , identifier[argx] ):
keyword[if] identifier[argv] [ identifier[argx] ]. identifier[find] ( literal[string] )>- literal[int] :
keyword[return] keyword[True]
keyword[if] identifier[argx] + literal[int] < identifier[len] ( identifier[argv] ) keyword[and] keyword[not] identifier[startswith_prefix] ( identifier[argv] [ identifier[argx] + literal[int] ]):
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[def] identifier[get_arg_value] ( identifier[argv] , identifier[argx] , identifier[argname] ):
keyword[if] identifier[argv] [ identifier[argx] ]. identifier[find] ( literal[string] )>- literal[int] :
keyword[return] literal[string] . identifier[join] ( identifier[argv] [ identifier[argx] ]. identifier[split] ( literal[string] )[ literal[int] :])
keyword[else] :
identifier[type_] = identifier[type_hints] . identifier[get] ( identifier[argname] , keyword[None] )
keyword[if] identifier[type_] keyword[is] keyword[None] :
keyword[return] identifier[argv] [ identifier[argx] + literal[int] ]
keyword[else] :
keyword[return] identifier[parse_arglist_hack] ( identifier[argx] , identifier[argv] = identifier[argv] )
keyword[for] identifier[argx] keyword[in] identifier[range] ( identifier[len] ( identifier[argv] )):
identifier[arg] = identifier[argv] [ identifier[argx] ]
keyword[for] identifier[prefix] keyword[in] identifier[prefix_list] :
keyword[if] identifier[arg] . identifier[startswith] ( identifier[prefix] ):
identifier[argname] = identifier[arg] [ identifier[len] ( identifier[prefix] ):]
keyword[if] identifier[argx_has_value] ( identifier[argv] , identifier[argx] ):
keyword[if] identifier[arg] . identifier[find] ( literal[string] )>- literal[int] :
identifier[argname] = identifier[arg] [ identifier[len] ( identifier[prefix] ): identifier[arg] . identifier[find] ( literal[string] )]
identifier[argvalue] = identifier[get_arg_value] ( identifier[argv] , identifier[argx] , identifier[argname] )
identifier[arg_dict] [ identifier[argname] ]= identifier[argvalue]
keyword[else] :
identifier[arg_dict] [ identifier[argname] ]= keyword[True]
keyword[break]
keyword[return] identifier[arg_dict] | def get_arg_dict(argv=None, prefix_list=['--'], type_hints={}):
"""
Yet another way for parsing args
CommandLine:
python -m utool.util_arg --exec-get_arg_dict
python -m utool.util_arg --test-get_arg_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_arg import * # NOQA
>>> import utool as ut
>>> import shlex
>>> argv = shlex.split('--test-show_name --name=IBEIS_PZ_0303 --db testdb3 --save "~/latex/crall-candidacy-2015/figures/IBEIS_PZ_0303.jpg" --dpath figures --caption="Shadowed" --figsize=11,3 --no-figtitle -t foo bar baz biz --notitle')
>>> arg_dict = ut.get_arg_dict(argv, prefix_list=['--', '-'], type_hints={'t': list})
>>> result = ut.repr2(arg_dict, nl=1)
>>> # verify results
>>> print(result)
{
'caption': 'Shadowed',
'db': 'testdb3',
'dpath': 'figures',
'figsize': '11,3',
'name': 'IBEIS_PZ_0303',
'no-figtitle': True,
'notitle': True,
'save': '~/latex/crall-candidacy-2015/figures/IBEIS_PZ_0303.jpg',
't': ['foo', 'bar', 'baz', 'biz'],
'test-show_name': True,
}
"""
if argv is None:
argv = sys.argv # depends on [control=['if'], data=['argv']]
arg_dict = {}
def startswith_prefix(arg):
return any([arg.startswith(prefix) for prefix in prefix_list])
def argx_has_value(argv, argx):
# Check if has a value
if argv[argx].find('=') > -1:
return True # depends on [control=['if'], data=[]]
if argx + 1 < len(argv) and (not startswith_prefix(argv[argx + 1])):
return True # depends on [control=['if'], data=[]]
return False
def get_arg_value(argv, argx, argname):
if argv[argx].find('=') > -1:
return '='.join(argv[argx].split('=')[1:]) # depends on [control=['if'], data=[]]
else:
type_ = type_hints.get(argname, None)
if type_ is None:
return argv[argx + 1] # depends on [control=['if'], data=[]]
else:
return parse_arglist_hack(argx, argv=argv)
for argx in range(len(argv)):
arg = argv[argx]
for prefix in prefix_list:
if arg.startswith(prefix):
argname = arg[len(prefix):]
if argx_has_value(argv, argx):
if arg.find('=') > -1:
argname = arg[len(prefix):arg.find('=')] # depends on [control=['if'], data=[]]
argvalue = get_arg_value(argv, argx, argname)
arg_dict[argname] = argvalue # depends on [control=['if'], data=[]]
else:
arg_dict[argname] = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prefix']] # depends on [control=['for'], data=['argx']]
return arg_dict |
def stacksize(self):
"""The maximum amount of stack space used by this code object.
"""
return max(scanl(
op.add,
0,
map(op.attrgetter('stack_effect'), self.instrs),
)) | def function[stacksize, parameter[self]]:
constant[The maximum amount of stack space used by this code object.
]
return[call[name[max], parameter[call[name[scanl], parameter[name[op].add, constant[0], call[name[map], parameter[call[name[op].attrgetter, parameter[constant[stack_effect]]], name[self].instrs]]]]]]] | keyword[def] identifier[stacksize] ( identifier[self] ):
literal[string]
keyword[return] identifier[max] ( identifier[scanl] (
identifier[op] . identifier[add] ,
literal[int] ,
identifier[map] ( identifier[op] . identifier[attrgetter] ( literal[string] ), identifier[self] . identifier[instrs] ),
)) | def stacksize(self):
"""The maximum amount of stack space used by this code object.
"""
return max(scanl(op.add, 0, map(op.attrgetter('stack_effect'), self.instrs))) |
def _get_ref_alt(self, var, boundary):
"""Get reference allele and alternative allele of the variant
"""
# Get reference allele
if var.posedit.edit.type == "ins" or var.posedit.edit.type == "dup":
ref = ""
else:
# For NARefAlt and Inv
if var.posedit.edit.ref_s is None or var.posedit.edit.ref == "":
ref = self._fetch_bounded_seq(var, var.posedit.pos.start.base - 1,
var.posedit.pos.end.base, 0, boundary)
else:
ref = var.posedit.edit.ref
# Get alternative allele
if var.posedit.edit.type == "sub" or var.posedit.edit.type == "delins" or var.posedit.edit.type == "ins":
alt = var.posedit.edit.alt
elif var.posedit.edit.type == "del":
alt = ""
elif var.posedit.edit.type == "dup":
alt = var.posedit.edit.ref or self._fetch_bounded_seq(
var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary)
elif var.posedit.edit.type == "inv":
alt = reverse_complement(ref)
elif var.posedit.edit.type == "identity":
alt = ref
return ref, alt | def function[_get_ref_alt, parameter[self, var, boundary]]:
constant[Get reference allele and alternative allele of the variant
]
if <ast.BoolOp object at 0x7da207f993f0> begin[:]
variable[ref] assign[=] constant[]
if <ast.BoolOp object at 0x7da207f9b700> begin[:]
variable[alt] assign[=] name[var].posedit.edit.alt
return[tuple[[<ast.Name object at 0x7da207f99a80>, <ast.Name object at 0x7da207f9bac0>]]] | keyword[def] identifier[_get_ref_alt] ( identifier[self] , identifier[var] , identifier[boundary] ):
literal[string]
keyword[if] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] keyword[or] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] :
identifier[ref] = literal[string]
keyword[else] :
keyword[if] identifier[var] . identifier[posedit] . identifier[edit] . identifier[ref_s] keyword[is] keyword[None] keyword[or] identifier[var] . identifier[posedit] . identifier[edit] . identifier[ref] == literal[string] :
identifier[ref] = identifier[self] . identifier[_fetch_bounded_seq] ( identifier[var] , identifier[var] . identifier[posedit] . identifier[pos] . identifier[start] . identifier[base] - literal[int] ,
identifier[var] . identifier[posedit] . identifier[pos] . identifier[end] . identifier[base] , literal[int] , identifier[boundary] )
keyword[else] :
identifier[ref] = identifier[var] . identifier[posedit] . identifier[edit] . identifier[ref]
keyword[if] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] keyword[or] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] keyword[or] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] :
identifier[alt] = identifier[var] . identifier[posedit] . identifier[edit] . identifier[alt]
keyword[elif] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] :
identifier[alt] = literal[string]
keyword[elif] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] :
identifier[alt] = identifier[var] . identifier[posedit] . identifier[edit] . identifier[ref] keyword[or] identifier[self] . identifier[_fetch_bounded_seq] (
identifier[var] , identifier[var] . identifier[posedit] . identifier[pos] . identifier[start] . identifier[base] - literal[int] , identifier[var] . identifier[posedit] . identifier[pos] . identifier[end] . identifier[base] , literal[int] , identifier[boundary] )
keyword[elif] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] :
identifier[alt] = identifier[reverse_complement] ( identifier[ref] )
keyword[elif] identifier[var] . identifier[posedit] . identifier[edit] . identifier[type] == literal[string] :
identifier[alt] = identifier[ref]
keyword[return] identifier[ref] , identifier[alt] | def _get_ref_alt(self, var, boundary):
"""Get reference allele and alternative allele of the variant
"""
# Get reference allele
if var.posedit.edit.type == 'ins' or var.posedit.edit.type == 'dup':
ref = '' # depends on [control=['if'], data=[]]
# For NARefAlt and Inv
elif var.posedit.edit.ref_s is None or var.posedit.edit.ref == '':
ref = self._fetch_bounded_seq(var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary) # depends on [control=['if'], data=[]]
else:
ref = var.posedit.edit.ref
# Get alternative allele
if var.posedit.edit.type == 'sub' or var.posedit.edit.type == 'delins' or var.posedit.edit.type == 'ins':
alt = var.posedit.edit.alt # depends on [control=['if'], data=[]]
elif var.posedit.edit.type == 'del':
alt = '' # depends on [control=['if'], data=[]]
elif var.posedit.edit.type == 'dup':
alt = var.posedit.edit.ref or self._fetch_bounded_seq(var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary) # depends on [control=['if'], data=[]]
elif var.posedit.edit.type == 'inv':
alt = reverse_complement(ref) # depends on [control=['if'], data=[]]
elif var.posedit.edit.type == 'identity':
alt = ref # depends on [control=['if'], data=[]]
return (ref, alt) |
def __create_author_investigator_str(self):
"""
When investigators is empty, try to get authors from the first publication instead.
:return str author: Author names
"""
_author = ""
try:
for pub in self.noaa_data_sorted["Publication"]:
if "author" in pub:
if pub["author"]:
_author_src = pub["author"]
if isinstance(_author_src, str):
try:
if " and " in _author_src:
_author = _author_src.replace(" and ", "; ")
elif ";" in _author_src:
# If there is a semi-colon, add a space after it, just in case it didn't have one
_author = _author_src.replace(";", "; ")
break
except Exception as e:
_author = ""
elif isinstance(_author_src, list):
try:
for _entry in _author_src:
_author += _entry["name"].split(",")[0] + ", "
except Exception as e:
_author = ""
except Exception:
_author = ""
return _author | def function[__create_author_investigator_str, parameter[self]]:
constant[
When investigators is empty, try to get authors from the first publication instead.
:return str author: Author names
]
variable[_author] assign[=] constant[]
<ast.Try object at 0x7da18f723640>
return[name[_author]] | keyword[def] identifier[__create_author_investigator_str] ( identifier[self] ):
literal[string]
identifier[_author] = literal[string]
keyword[try] :
keyword[for] identifier[pub] keyword[in] identifier[self] . identifier[noaa_data_sorted] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[pub] :
keyword[if] identifier[pub] [ literal[string] ]:
identifier[_author_src] = identifier[pub] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[_author_src] , identifier[str] ):
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[_author_src] :
identifier[_author] = identifier[_author_src] . identifier[replace] ( literal[string] , literal[string] )
keyword[elif] literal[string] keyword[in] identifier[_author_src] :
identifier[_author] = identifier[_author_src] . identifier[replace] ( literal[string] , literal[string] )
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[_author] = literal[string]
keyword[elif] identifier[isinstance] ( identifier[_author_src] , identifier[list] ):
keyword[try] :
keyword[for] identifier[_entry] keyword[in] identifier[_author_src] :
identifier[_author] += identifier[_entry] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]+ literal[string]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[_author] = literal[string]
keyword[except] identifier[Exception] :
identifier[_author] = literal[string]
keyword[return] identifier[_author] | def __create_author_investigator_str(self):
"""
When investigators is empty, try to get authors from the first publication instead.
:return str author: Author names
"""
_author = ''
try:
for pub in self.noaa_data_sorted['Publication']:
if 'author' in pub:
if pub['author']:
_author_src = pub['author']
if isinstance(_author_src, str):
try:
if ' and ' in _author_src:
_author = _author_src.replace(' and ', '; ') # depends on [control=['if'], data=['_author_src']]
elif ';' in _author_src:
# If there is a semi-colon, add a space after it, just in case it didn't have one
_author = _author_src.replace(';', '; ') # depends on [control=['if'], data=['_author_src']]
break # depends on [control=['try'], data=[]]
except Exception as e:
_author = '' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(_author_src, list):
try:
for _entry in _author_src:
_author += _entry['name'].split(',')[0] + ', ' # depends on [control=['for'], data=['_entry']] # depends on [control=['try'], data=[]]
except Exception as e:
_author = '' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pub']] # depends on [control=['for'], data=['pub']] # depends on [control=['try'], data=[]]
except Exception:
_author = '' # depends on [control=['except'], data=[]]
return _author |
def dump(self, filename=None, compressed=True, pretty=True):
'''dump device window and pull to local file.'''
content = self.server.jsonrpc.dumpWindowHierarchy(compressed, None)
if filename:
with open(filename, "wb") as f:
f.write(content.encode("utf-8"))
if pretty and "\n " not in content:
xml_text = xml.dom.minidom.parseString(content.encode("utf-8"))
content = U(xml_text.toprettyxml(indent=' '))
return content | def function[dump, parameter[self, filename, compressed, pretty]]:
constant[dump device window and pull to local file.]
variable[content] assign[=] call[name[self].server.jsonrpc.dumpWindowHierarchy, parameter[name[compressed], constant[None]]]
if name[filename] begin[:]
with call[name[open], parameter[name[filename], constant[wb]]] begin[:]
call[name[f].write, parameter[call[name[content].encode, parameter[constant[utf-8]]]]]
if <ast.BoolOp object at 0x7da2054a7550> begin[:]
variable[xml_text] assign[=] call[name[xml].dom.minidom.parseString, parameter[call[name[content].encode, parameter[constant[utf-8]]]]]
variable[content] assign[=] call[name[U], parameter[call[name[xml_text].toprettyxml, parameter[]]]]
return[name[content]] | keyword[def] identifier[dump] ( identifier[self] , identifier[filename] = keyword[None] , identifier[compressed] = keyword[True] , identifier[pretty] = keyword[True] ):
literal[string]
identifier[content] = identifier[self] . identifier[server] . identifier[jsonrpc] . identifier[dumpWindowHierarchy] ( identifier[compressed] , keyword[None] )
keyword[if] identifier[filename] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[content] . identifier[encode] ( literal[string] ))
keyword[if] identifier[pretty] keyword[and] literal[string] keyword[not] keyword[in] identifier[content] :
identifier[xml_text] = identifier[xml] . identifier[dom] . identifier[minidom] . identifier[parseString] ( identifier[content] . identifier[encode] ( literal[string] ))
identifier[content] = identifier[U] ( identifier[xml_text] . identifier[toprettyxml] ( identifier[indent] = literal[string] ))
keyword[return] identifier[content] | def dump(self, filename=None, compressed=True, pretty=True):
"""dump device window and pull to local file."""
content = self.server.jsonrpc.dumpWindowHierarchy(compressed, None)
if filename:
with open(filename, 'wb') as f:
f.write(content.encode('utf-8')) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
if pretty and '\n ' not in content:
xml_text = xml.dom.minidom.parseString(content.encode('utf-8'))
content = U(xml_text.toprettyxml(indent=' ')) # depends on [control=['if'], data=[]]
return content |
def run(self):
"""Build the Fortran library, all python extensions and the docs."""
print('---- CUSTOM DEVELOP ----')
_develop.run(self)
# build documentation
print('---- BUILDING DOCS ----')
docdir = os.path.join(self.setup_path, 'pyshtools', 'doc')
self.mkpath(docdir)
doc_builder = os.path.join(self.setup_path, 'pyshtools',
'make_docs.py')
doc_source = '.'
check_call([sys.executable, doc_builder, doc_source, self.setup_path])
print('---- ALL DONE ----') | def function[run, parameter[self]]:
constant[Build the Fortran library, all python extensions and the docs.]
call[name[print], parameter[constant[---- CUSTOM DEVELOP ----]]]
call[name[_develop].run, parameter[name[self]]]
call[name[print], parameter[constant[---- BUILDING DOCS ----]]]
variable[docdir] assign[=] call[name[os].path.join, parameter[name[self].setup_path, constant[pyshtools], constant[doc]]]
call[name[self].mkpath, parameter[name[docdir]]]
variable[doc_builder] assign[=] call[name[os].path.join, parameter[name[self].setup_path, constant[pyshtools], constant[make_docs.py]]]
variable[doc_source] assign[=] constant[.]
call[name[check_call], parameter[list[[<ast.Attribute object at 0x7da18bccb310>, <ast.Name object at 0x7da18bccb220>, <ast.Name object at 0x7da18bcc8070>, <ast.Attribute object at 0x7da18bccad40>]]]]
call[name[print], parameter[constant[---- ALL DONE ----]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[print] ( literal[string] )
identifier[_develop] . identifier[run] ( identifier[self] )
identifier[print] ( literal[string] )
identifier[docdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[setup_path] , literal[string] , literal[string] )
identifier[self] . identifier[mkpath] ( identifier[docdir] )
identifier[doc_builder] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[setup_path] , literal[string] ,
literal[string] )
identifier[doc_source] = literal[string]
identifier[check_call] ([ identifier[sys] . identifier[executable] , identifier[doc_builder] , identifier[doc_source] , identifier[self] . identifier[setup_path] ])
identifier[print] ( literal[string] ) | def run(self):
"""Build the Fortran library, all python extensions and the docs."""
print('---- CUSTOM DEVELOP ----')
_develop.run(self)
# build documentation
print('---- BUILDING DOCS ----')
docdir = os.path.join(self.setup_path, 'pyshtools', 'doc')
self.mkpath(docdir)
doc_builder = os.path.join(self.setup_path, 'pyshtools', 'make_docs.py')
doc_source = '.'
check_call([sys.executable, doc_builder, doc_source, self.setup_path])
print('---- ALL DONE ----') |
def compile_all(self):
"""Compiles all of the contracts in the self.contracts_dir directory
Creates {contract name}.json files in self.output_dir that contain
the build output for each contract.
"""
# Solidity input JSON
solc_input = self.get_solc_input()
# Compile the contracts
real_path = os.path.realpath(self.contracts_dir)
compilation_result = compile_standard(solc_input, allow_paths=real_path)
# Create the output folder if it doesn't already exist
os.makedirs(self.output_dir, exist_ok=True)
# Write the contract ABI to output files
compiled_contracts = compilation_result['contracts']
for contract_file in compiled_contracts:
for contract in compiled_contracts[contract_file]:
contract_name = contract.split('.')[0]
contract_data = compiled_contracts[contract_file][contract_name]
contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)
with open(contract_data_path, "w+") as contract_data_file:
json.dump(contract_data, contract_data_file) | def function[compile_all, parameter[self]]:
constant[Compiles all of the contracts in the self.contracts_dir directory
Creates {contract name}.json files in self.output_dir that contain
the build output for each contract.
]
variable[solc_input] assign[=] call[name[self].get_solc_input, parameter[]]
variable[real_path] assign[=] call[name[os].path.realpath, parameter[name[self].contracts_dir]]
variable[compilation_result] assign[=] call[name[compile_standard], parameter[name[solc_input]]]
call[name[os].makedirs, parameter[name[self].output_dir]]
variable[compiled_contracts] assign[=] call[name[compilation_result]][constant[contracts]]
for taget[name[contract_file]] in starred[name[compiled_contracts]] begin[:]
for taget[name[contract]] in starred[call[name[compiled_contracts]][name[contract_file]]] begin[:]
variable[contract_name] assign[=] call[call[name[contract].split, parameter[constant[.]]]][constant[0]]
variable[contract_data] assign[=] call[call[name[compiled_contracts]][name[contract_file]]][name[contract_name]]
variable[contract_data_path] assign[=] binary_operation[name[self].output_dir + call[constant[/{0}.json].format, parameter[name[contract_name]]]]
with call[name[open], parameter[name[contract_data_path], constant[w+]]] begin[:]
call[name[json].dump, parameter[name[contract_data], name[contract_data_file]]] | keyword[def] identifier[compile_all] ( identifier[self] ):
literal[string]
identifier[solc_input] = identifier[self] . identifier[get_solc_input] ()
identifier[real_path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[self] . identifier[contracts_dir] )
identifier[compilation_result] = identifier[compile_standard] ( identifier[solc_input] , identifier[allow_paths] = identifier[real_path] )
identifier[os] . identifier[makedirs] ( identifier[self] . identifier[output_dir] , identifier[exist_ok] = keyword[True] )
identifier[compiled_contracts] = identifier[compilation_result] [ literal[string] ]
keyword[for] identifier[contract_file] keyword[in] identifier[compiled_contracts] :
keyword[for] identifier[contract] keyword[in] identifier[compiled_contracts] [ identifier[contract_file] ]:
identifier[contract_name] = identifier[contract] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[contract_data] = identifier[compiled_contracts] [ identifier[contract_file] ][ identifier[contract_name] ]
identifier[contract_data_path] = identifier[self] . identifier[output_dir] + literal[string] . identifier[format] ( identifier[contract_name] )
keyword[with] identifier[open] ( identifier[contract_data_path] , literal[string] ) keyword[as] identifier[contract_data_file] :
identifier[json] . identifier[dump] ( identifier[contract_data] , identifier[contract_data_file] ) | def compile_all(self):
"""Compiles all of the contracts in the self.contracts_dir directory
Creates {contract name}.json files in self.output_dir that contain
the build output for each contract.
"""
# Solidity input JSON
solc_input = self.get_solc_input()
# Compile the contracts
real_path = os.path.realpath(self.contracts_dir)
compilation_result = compile_standard(solc_input, allow_paths=real_path)
# Create the output folder if it doesn't already exist
os.makedirs(self.output_dir, exist_ok=True)
# Write the contract ABI to output files
compiled_contracts = compilation_result['contracts']
for contract_file in compiled_contracts:
for contract in compiled_contracts[contract_file]:
contract_name = contract.split('.')[0]
contract_data = compiled_contracts[contract_file][contract_name]
contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)
with open(contract_data_path, 'w+') as contract_data_file:
json.dump(contract_data, contract_data_file) # depends on [control=['with'], data=['contract_data_file']] # depends on [control=['for'], data=['contract']] # depends on [control=['for'], data=['contract_file']] |
def update_customer(self, customer_id, customer_deets):
"""Updates an existing customer."""
request = self._put("customers/" + str(customer_id), customer_deets)
return self.responder(request) | def function[update_customer, parameter[self, customer_id, customer_deets]]:
constant[Updates an existing customer.]
variable[request] assign[=] call[name[self]._put, parameter[binary_operation[constant[customers/] + call[name[str], parameter[name[customer_id]]]], name[customer_deets]]]
return[call[name[self].responder, parameter[name[request]]]] | keyword[def] identifier[update_customer] ( identifier[self] , identifier[customer_id] , identifier[customer_deets] ):
literal[string]
identifier[request] = identifier[self] . identifier[_put] ( literal[string] + identifier[str] ( identifier[customer_id] ), identifier[customer_deets] )
keyword[return] identifier[self] . identifier[responder] ( identifier[request] ) | def update_customer(self, customer_id, customer_deets):
"""Updates an existing customer."""
request = self._put('customers/' + str(customer_id), customer_deets)
return self.responder(request) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.