code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def add_transition(self, output,
probability_func=lambda index: np.ones(len(index), dtype=float),
triggered=Trigger.NOT_TRIGGERED):
"""Builds a transition from this state to the given state.
output : State
The end state after the transition.
"""
t = Transition(self, output, probability_func=probability_func, triggered=triggered)
self.transition_set.append(t)
return t | def function[add_transition, parameter[self, output, probability_func, triggered]]:
constant[Builds a transition from this state to the given state.
output : State
The end state after the transition.
]
variable[t] assign[=] call[name[Transition], parameter[name[self], name[output]]]
call[name[self].transition_set.append, parameter[name[t]]]
return[name[t]] | keyword[def] identifier[add_transition] ( identifier[self] , identifier[output] ,
identifier[probability_func] = keyword[lambda] identifier[index] : identifier[np] . identifier[ones] ( identifier[len] ( identifier[index] ), identifier[dtype] = identifier[float] ),
identifier[triggered] = identifier[Trigger] . identifier[NOT_TRIGGERED] ):
literal[string]
identifier[t] = identifier[Transition] ( identifier[self] , identifier[output] , identifier[probability_func] = identifier[probability_func] , identifier[triggered] = identifier[triggered] )
identifier[self] . identifier[transition_set] . identifier[append] ( identifier[t] )
keyword[return] identifier[t] | def add_transition(self, output, probability_func=lambda index: np.ones(len(index), dtype=float), triggered=Trigger.NOT_TRIGGERED):
"""Builds a transition from this state to the given state.
output : State
The end state after the transition.
"""
t = Transition(self, output, probability_func=probability_func, triggered=triggered)
self.transition_set.append(t)
return t |
def post_message(self, channel_id, message, **kwargs):
"""
Send a message using the slack Event API.
Event messages should be used for more complex messages. See
https://api.slack.com/methods/chat.postMessage for details on arguments can be included
with your message.
When using the post_message API, to have your message look like it's sent from your bot
you'll need to include the `as_user` kwarg. Example of how to do this:
server.slack.post_message(msg['channel'], 'My message', as_user=server.slack.username)
"""
params = {
"post_data": {
"text": message,
"channel": channel_id,
}
}
params["post_data"].update(kwargs)
return self.api_call("chat.postMessage", **params) | def function[post_message, parameter[self, channel_id, message]]:
constant[
Send a message using the slack Event API.
Event messages should be used for more complex messages. See
https://api.slack.com/methods/chat.postMessage for details on arguments can be included
with your message.
When using the post_message API, to have your message look like it's sent from your bot
you'll need to include the `as_user` kwarg. Example of how to do this:
server.slack.post_message(msg['channel'], 'My message', as_user=server.slack.username)
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b104a5f0>], [<ast.Dict object at 0x7da1b104aef0>]]
call[call[name[params]][constant[post_data]].update, parameter[name[kwargs]]]
return[call[name[self].api_call, parameter[constant[chat.postMessage]]]] | keyword[def] identifier[post_message] ( identifier[self] , identifier[channel_id] , identifier[message] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] :{
literal[string] : identifier[message] ,
literal[string] : identifier[channel_id] ,
}
}
identifier[params] [ literal[string] ]. identifier[update] ( identifier[kwargs] )
keyword[return] identifier[self] . identifier[api_call] ( literal[string] ,** identifier[params] ) | def post_message(self, channel_id, message, **kwargs):
"""
Send a message using the slack Event API.
Event messages should be used for more complex messages. See
https://api.slack.com/methods/chat.postMessage for details on arguments can be included
with your message.
When using the post_message API, to have your message look like it's sent from your bot
you'll need to include the `as_user` kwarg. Example of how to do this:
server.slack.post_message(msg['channel'], 'My message', as_user=server.slack.username)
"""
params = {'post_data': {'text': message, 'channel': channel_id}}
params['post_data'].update(kwargs)
return self.api_call('chat.postMessage', **params) |
def interp(self, new_timestamps, interpolation_mode=0):
""" returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
"""
if not len(self.samples) or not len(new_timestamps):
return Signal(
self.samples.copy(),
self.timestamps.copy(),
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits.copy()
if self.invalidation_bits is not None
else None,
encoding=self.encoding,
)
else:
if len(self.samples.shape) > 1:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
kind = self.samples.dtype.kind
if kind == "f":
s = np.interp(new_timestamps, self.timestamps, self.samples)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
elif kind in "ui":
if interpolation_mode == 1:
s = np.interp(
new_timestamps, self.timestamps, self.samples
).astype(self.samples.dtype)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
return Signal(
s,
new_timestamps,
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
source=self.source,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
) | def function[interp, parameter[self, new_timestamps, interpolation_mode]]:
constant[ returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
]
if <ast.BoolOp object at 0x7da1b18c1570> begin[:]
return[call[name[Signal], parameter[call[name[self].samples.copy, parameter[]], call[name[self].timestamps.copy, parameter[]], name[self].unit, name[self].name]]] | keyword[def] identifier[interp] ( identifier[self] , identifier[new_timestamps] , identifier[interpolation_mode] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[len] ( identifier[self] . identifier[samples] ) keyword[or] keyword[not] identifier[len] ( identifier[new_timestamps] ):
keyword[return] identifier[Signal] (
identifier[self] . identifier[samples] . identifier[copy] (),
identifier[self] . identifier[timestamps] . identifier[copy] (),
identifier[self] . identifier[unit] ,
identifier[self] . identifier[name] ,
identifier[comment] = identifier[self] . identifier[comment] ,
identifier[conversion] = identifier[self] . identifier[conversion] ,
identifier[raw] = identifier[self] . identifier[raw] ,
identifier[master_metadata] = identifier[self] . identifier[master_metadata] ,
identifier[display_name] = identifier[self] . identifier[display_name] ,
identifier[attachment] = identifier[self] . identifier[attachment] ,
identifier[stream_sync] = identifier[self] . identifier[stream_sync] ,
identifier[invalidation_bits] = identifier[self] . identifier[invalidation_bits] . identifier[copy] ()
keyword[if] identifier[self] . identifier[invalidation_bits] keyword[is] keyword[not] keyword[None]
keyword[else] keyword[None] ,
identifier[encoding] = identifier[self] . identifier[encoding] ,
)
keyword[else] :
keyword[if] identifier[len] ( identifier[self] . identifier[samples] . identifier[shape] )> literal[int] :
identifier[idx] = identifier[np] . identifier[searchsorted] ( identifier[self] . identifier[timestamps] , identifier[new_timestamps] , identifier[side] = literal[string] )
identifier[idx] -= literal[int]
identifier[idx] = identifier[np] . identifier[clip] ( identifier[idx] , literal[int] , identifier[idx] [- literal[int] ])
identifier[s] = identifier[self] . identifier[samples] [ identifier[idx] ]
keyword[if] identifier[self] . identifier[invalidation_bits] keyword[is] keyword[not] keyword[None] :
identifier[invalidation_bits] = identifier[self] . identifier[invalidation_bits] [ identifier[idx] ]
keyword[else] :
identifier[invalidation_bits] = keyword[None]
keyword[else] :
identifier[kind] = identifier[self] . identifier[samples] . identifier[dtype] . identifier[kind]
keyword[if] identifier[kind] == literal[string] :
identifier[s] = identifier[np] . identifier[interp] ( identifier[new_timestamps] , identifier[self] . identifier[timestamps] , identifier[self] . identifier[samples] )
keyword[if] identifier[self] . identifier[invalidation_bits] keyword[is] keyword[not] keyword[None] :
identifier[idx] = identifier[np] . identifier[searchsorted] (
identifier[self] . identifier[timestamps] , identifier[new_timestamps] , identifier[side] = literal[string]
)
identifier[idx] -= literal[int]
identifier[idx] = identifier[np] . identifier[clip] ( identifier[idx] , literal[int] , identifier[idx] [- literal[int] ])
identifier[invalidation_bits] = identifier[self] . identifier[invalidation_bits] [ identifier[idx] ]
keyword[else] :
identifier[invalidation_bits] = keyword[None]
keyword[elif] identifier[kind] keyword[in] literal[string] :
keyword[if] identifier[interpolation_mode] == literal[int] :
identifier[s] = identifier[np] . identifier[interp] (
identifier[new_timestamps] , identifier[self] . identifier[timestamps] , identifier[self] . identifier[samples]
). identifier[astype] ( identifier[self] . identifier[samples] . identifier[dtype] )
keyword[if] identifier[self] . identifier[invalidation_bits] keyword[is] keyword[not] keyword[None] :
identifier[idx] = identifier[np] . identifier[searchsorted] (
identifier[self] . identifier[timestamps] , identifier[new_timestamps] , identifier[side] = literal[string]
)
identifier[idx] -= literal[int]
identifier[idx] = identifier[np] . identifier[clip] ( identifier[idx] , literal[int] , identifier[idx] [- literal[int] ])
identifier[invalidation_bits] = identifier[self] . identifier[invalidation_bits] [ identifier[idx] ]
keyword[else] :
identifier[invalidation_bits] = keyword[None]
keyword[else] :
identifier[idx] = identifier[np] . identifier[searchsorted] (
identifier[self] . identifier[timestamps] , identifier[new_timestamps] , identifier[side] = literal[string]
)
identifier[idx] -= literal[int]
identifier[idx] = identifier[np] . identifier[clip] ( identifier[idx] , literal[int] , identifier[idx] [- literal[int] ])
identifier[s] = identifier[self] . identifier[samples] [ identifier[idx] ]
keyword[if] identifier[self] . identifier[invalidation_bits] keyword[is] keyword[not] keyword[None] :
identifier[invalidation_bits] = identifier[self] . identifier[invalidation_bits] [ identifier[idx] ]
keyword[else] :
identifier[invalidation_bits] = keyword[None]
keyword[else] :
identifier[idx] = identifier[np] . identifier[searchsorted] ( identifier[self] . identifier[timestamps] , identifier[new_timestamps] , identifier[side] = literal[string] )
identifier[idx] -= literal[int]
identifier[idx] = identifier[np] . identifier[clip] ( identifier[idx] , literal[int] , identifier[idx] [- literal[int] ])
identifier[s] = identifier[self] . identifier[samples] [ identifier[idx] ]
keyword[if] identifier[self] . identifier[invalidation_bits] keyword[is] keyword[not] keyword[None] :
identifier[invalidation_bits] = identifier[self] . identifier[invalidation_bits] [ identifier[idx] ]
keyword[else] :
identifier[invalidation_bits] = keyword[None]
keyword[return] identifier[Signal] (
identifier[s] ,
identifier[new_timestamps] ,
identifier[self] . identifier[unit] ,
identifier[self] . identifier[name] ,
identifier[comment] = identifier[self] . identifier[comment] ,
identifier[conversion] = identifier[self] . identifier[conversion] ,
identifier[source] = identifier[self] . identifier[source] ,
identifier[raw] = identifier[self] . identifier[raw] ,
identifier[master_metadata] = identifier[self] . identifier[master_metadata] ,
identifier[display_name] = identifier[self] . identifier[display_name] ,
identifier[attachment] = identifier[self] . identifier[attachment] ,
identifier[stream_sync] = identifier[self] . identifier[stream_sync] ,
identifier[invalidation_bits] = identifier[invalidation_bits] ,
identifier[encoding] = identifier[self] . identifier[encoding] ,
) | def interp(self, new_timestamps, interpolation_mode=0):
""" returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
"""
if not len(self.samples) or not len(new_timestamps):
return Signal(self.samples.copy(), self.timestamps.copy(), self.unit, self.name, comment=self.comment, conversion=self.conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits.copy() if self.invalidation_bits is not None else None, encoding=self.encoding) # depends on [control=['if'], data=[]]
else:
if len(self.samples.shape) > 1:
idx = np.searchsorted(self.timestamps, new_timestamps, side='right')
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx] # depends on [control=['if'], data=[]]
else:
invalidation_bits = None # depends on [control=['if'], data=[]]
else:
kind = self.samples.dtype.kind
if kind == 'f':
s = np.interp(new_timestamps, self.timestamps, self.samples)
if self.invalidation_bits is not None:
idx = np.searchsorted(self.timestamps, new_timestamps, side='right')
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx] # depends on [control=['if'], data=[]]
else:
invalidation_bits = None # depends on [control=['if'], data=[]]
elif kind in 'ui':
if interpolation_mode == 1:
s = np.interp(new_timestamps, self.timestamps, self.samples).astype(self.samples.dtype)
if self.invalidation_bits is not None:
idx = np.searchsorted(self.timestamps, new_timestamps, side='right')
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx] # depends on [control=['if'], data=[]]
else:
invalidation_bits = None # depends on [control=['if'], data=[]]
else:
idx = np.searchsorted(self.timestamps, new_timestamps, side='right')
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx] # depends on [control=['if'], data=[]]
else:
invalidation_bits = None # depends on [control=['if'], data=[]]
else:
idx = np.searchsorted(self.timestamps, new_timestamps, side='right')
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx] # depends on [control=['if'], data=[]]
else:
invalidation_bits = None
return Signal(s, new_timestamps, self.unit, self.name, comment=self.comment, conversion=self.conversion, source=self.source, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=invalidation_bits, encoding=self.encoding) |
def _adapt_param(self, key, val):
"""
Adapt the value if an adapter is defined.
"""
if key in self.param_adapters:
try:
return self.param_adapters[key](val)
except (AdaptError, AdaptErrors, TypeError, ValueError) as e:
if hasattr(e, 'errors'):
errors = e.errors
else:
errors = [e]
raise AnticipateParamError(
message='Input value %r for parameter `%s` does not match '
'anticipated type %r' % (type(val), key, self.params[key]),
name=key,
value=val,
anticipated=self.params[key],
errors=errors)
else:
return val | def function[_adapt_param, parameter[self, key, val]]:
constant[
Adapt the value if an adapter is defined.
]
if compare[name[key] in name[self].param_adapters] begin[:]
<ast.Try object at 0x7da18fe90f10> | keyword[def] identifier[_adapt_param] ( identifier[self] , identifier[key] , identifier[val] ):
literal[string]
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[param_adapters] :
keyword[try] :
keyword[return] identifier[self] . identifier[param_adapters] [ identifier[key] ]( identifier[val] )
keyword[except] ( identifier[AdaptError] , identifier[AdaptErrors] , identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[e] :
keyword[if] identifier[hasattr] ( identifier[e] , literal[string] ):
identifier[errors] = identifier[e] . identifier[errors]
keyword[else] :
identifier[errors] =[ identifier[e] ]
keyword[raise] identifier[AnticipateParamError] (
identifier[message] = literal[string]
literal[string] %( identifier[type] ( identifier[val] ), identifier[key] , identifier[self] . identifier[params] [ identifier[key] ]),
identifier[name] = identifier[key] ,
identifier[value] = identifier[val] ,
identifier[anticipated] = identifier[self] . identifier[params] [ identifier[key] ],
identifier[errors] = identifier[errors] )
keyword[else] :
keyword[return] identifier[val] | def _adapt_param(self, key, val):
"""
Adapt the value if an adapter is defined.
"""
if key in self.param_adapters:
try:
return self.param_adapters[key](val) # depends on [control=['try'], data=[]]
except (AdaptError, AdaptErrors, TypeError, ValueError) as e:
if hasattr(e, 'errors'):
errors = e.errors # depends on [control=['if'], data=[]]
else:
errors = [e]
raise AnticipateParamError(message='Input value %r for parameter `%s` does not match anticipated type %r' % (type(val), key, self.params[key]), name=key, value=val, anticipated=self.params[key], errors=errors) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['key']]
else:
return val |
def from_json(value, native_datetimes=True):
"""
Deserializes the given value from JSON.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool
"""
hook = BasicJsonDecoder(native_datetimes=native_datetimes)
result = json.loads(value, object_hook=hook)
if native_datetimes and isinstance(result, string_types):
return get_date_or_string(result)
return result | def function[from_json, parameter[value, native_datetimes]]:
constant[
Deserializes the given value from JSON.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool
]
variable[hook] assign[=] call[name[BasicJsonDecoder], parameter[]]
variable[result] assign[=] call[name[json].loads, parameter[name[value]]]
if <ast.BoolOp object at 0x7da207f035b0> begin[:]
return[call[name[get_date_or_string], parameter[name[result]]]]
return[name[result]] | keyword[def] identifier[from_json] ( identifier[value] , identifier[native_datetimes] = keyword[True] ):
literal[string]
identifier[hook] = identifier[BasicJsonDecoder] ( identifier[native_datetimes] = identifier[native_datetimes] )
identifier[result] = identifier[json] . identifier[loads] ( identifier[value] , identifier[object_hook] = identifier[hook] )
keyword[if] identifier[native_datetimes] keyword[and] identifier[isinstance] ( identifier[result] , identifier[string_types] ):
keyword[return] identifier[get_date_or_string] ( identifier[result] )
keyword[return] identifier[result] | def from_json(value, native_datetimes=True):
"""
Deserializes the given value from JSON.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool
"""
hook = BasicJsonDecoder(native_datetimes=native_datetimes)
result = json.loads(value, object_hook=hook)
if native_datetimes and isinstance(result, string_types):
return get_date_or_string(result) # depends on [control=['if'], data=[]]
return result |
def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
print('-I- Clear data object')
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.data_model)
print('-I- Initializing headers')
self.parent.er_magic.init_default_headers()
self.parent.er_magic.init_actual_headers() | def function[on_clear, parameter[self, event]]:
constant[
initialize window to allow user to empty the working directory
]
variable[dia] assign[=] call[name[pmag_menu_dialogs].ClearWD, parameter[name[self].parent, name[self].parent.WD]]
variable[clear] assign[=] call[name[dia].do_clear, parameter[]]
if name[clear] begin[:]
call[name[print], parameter[constant[-I- Clear data object]]]
name[self].parent.er_magic assign[=] call[name[builder].ErMagicBuilder, parameter[name[self].parent.WD, name[self].parent.data_model]]
call[name[print], parameter[constant[-I- Initializing headers]]]
call[name[self].parent.er_magic.init_default_headers, parameter[]]
call[name[self].parent.er_magic.init_actual_headers, parameter[]] | keyword[def] identifier[on_clear] ( identifier[self] , identifier[event] ):
literal[string]
identifier[dia] = identifier[pmag_menu_dialogs] . identifier[ClearWD] ( identifier[self] . identifier[parent] , identifier[self] . identifier[parent] . identifier[WD] )
identifier[clear] = identifier[dia] . identifier[do_clear] ()
keyword[if] identifier[clear] :
identifier[print] ( literal[string] )
identifier[self] . identifier[parent] . identifier[er_magic] = identifier[builder] . identifier[ErMagicBuilder] ( identifier[self] . identifier[parent] . identifier[WD] , identifier[self] . identifier[parent] . identifier[data_model] )
identifier[print] ( literal[string] )
identifier[self] . identifier[parent] . identifier[er_magic] . identifier[init_default_headers] ()
identifier[self] . identifier[parent] . identifier[er_magic] . identifier[init_actual_headers] () | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
print('-I- Clear data object')
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.data_model)
print('-I- Initializing headers')
self.parent.er_magic.init_default_headers()
self.parent.er_magic.init_actual_headers() # depends on [control=['if'], data=[]] |
def rebuild_collection_tree(old_collection_ident, new_document_id_map, plpy):
"""Create a new tree for the collection based on the old tree.
This uses new document ids, replacing old ones.
"""
get_tree = plpy.prepare('''
WITH RECURSIVE t(node, parent, document, title, childorder, latest, path)
AS (SELECT tr.nodeid, tr.parent_id, tr.documentid, tr.title,
tr.childorder, tr.latest, ARRAY[tr.nodeid]
FROM trees tr
WHERE tr.documentid = $1 AND tr.is_collated = 'False'
UNION ALL
SELECT c.nodeid, c.parent_id, c.documentid, c.title,
c.childorder, c.latest, path || ARRAY[c.nodeid]
FROM trees c JOIN t ON (c.parent_id = t.node)
WHERE not c.nodeid = ANY(t.path)
)
SELECT * FROM t
''', ('integer',))
def get_old_tree():
return plpy.execute(get_tree, (old_collection_ident,))
tree = {} # { old_nodeid: {'data': ...}, ...}
children = {} # { nodeid: [child_nodeid, ...], child_nodeid: [...]}
for i in get_old_tree():
tree[i['node']] = {'data': i, 'new_nodeid': None}
children.setdefault(i['parent'], [])
children[i['parent']].append(i['node'])
insert_tree = plpy.prepare('''
INSERT INTO trees (nodeid, parent_id, documentid,
title, childorder, latest)
VALUES (DEFAULT, $1, $2, $3, $4, $5)
RETURNING nodeid
''', ('integer', 'integer', 'text', 'integer', 'boolean'))
def execute(fields):
results = plpy.execute(insert_tree, fields, 1)
return results[0]['nodeid']
root_node = children[None][0]
def build_tree(node, parent):
data = tree[node]['data']
new_node = execute([parent, new_document_id_map.get(data['document'],
data['document']), data['title'],
data['childorder'], data['latest']])
for i in children.get(node, []):
build_tree(i, new_node)
build_tree(root_node, None) | def function[rebuild_collection_tree, parameter[old_collection_ident, new_document_id_map, plpy]]:
constant[Create a new tree for the collection based on the old tree.
This uses new document ids, replacing old ones.
]
variable[get_tree] assign[=] call[name[plpy].prepare, parameter[constant[
WITH RECURSIVE t(node, parent, document, title, childorder, latest, path)
AS (SELECT tr.nodeid, tr.parent_id, tr.documentid, tr.title,
tr.childorder, tr.latest, ARRAY[tr.nodeid]
FROM trees tr
WHERE tr.documentid = $1 AND tr.is_collated = 'False'
UNION ALL
SELECT c.nodeid, c.parent_id, c.documentid, c.title,
c.childorder, c.latest, path || ARRAY[c.nodeid]
FROM trees c JOIN t ON (c.parent_id = t.node)
WHERE not c.nodeid = ANY(t.path)
)
SELECT * FROM t
], tuple[[<ast.Constant object at 0x7da1b1932a10>]]]]
def function[get_old_tree, parameter[]]:
return[call[name[plpy].execute, parameter[name[get_tree], tuple[[<ast.Name object at 0x7da1b1931090>]]]]]
variable[tree] assign[=] dictionary[[], []]
variable[children] assign[=] dictionary[[], []]
for taget[name[i]] in starred[call[name[get_old_tree], parameter[]]] begin[:]
call[name[tree]][call[name[i]][constant[node]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b19d3850>, <ast.Constant object at 0x7da1b19d0430>], [<ast.Name object at 0x7da1b19d3160>, <ast.Constant object at 0x7da1b19d1bd0>]]
call[name[children].setdefault, parameter[call[name[i]][constant[parent]], list[[]]]]
call[call[name[children]][call[name[i]][constant[parent]]].append, parameter[call[name[i]][constant[node]]]]
variable[insert_tree] assign[=] call[name[plpy].prepare, parameter[constant[
INSERT INTO trees (nodeid, parent_id, documentid,
title, childorder, latest)
VALUES (DEFAULT, $1, $2, $3, $4, $5)
RETURNING nodeid
], tuple[[<ast.Constant object at 0x7da1b19ccf10>, <ast.Constant object at 0x7da1b19ceda0>, <ast.Constant object at 0x7da1b19cffd0>, <ast.Constant object at 0x7da1b19cce50>, <ast.Constant object at 0x7da1b19ced10>]]]]
def function[execute, parameter[fields]]:
variable[results] assign[=] call[name[plpy].execute, parameter[name[insert_tree], name[fields], constant[1]]]
return[call[call[name[results]][constant[0]]][constant[nodeid]]]
variable[root_node] assign[=] call[call[name[children]][constant[None]]][constant[0]]
def function[build_tree, parameter[node, parent]]:
variable[data] assign[=] call[call[name[tree]][name[node]]][constant[data]]
variable[new_node] assign[=] call[name[execute], parameter[list[[<ast.Name object at 0x7da1b19cd210>, <ast.Call object at 0x7da1b1a3d390>, <ast.Subscript object at 0x7da1b1a3c640>, <ast.Subscript object at 0x7da1b1a3dc30>, <ast.Subscript object at 0x7da1b1a3d810>]]]]
for taget[name[i]] in starred[call[name[children].get, parameter[name[node], list[[]]]]] begin[:]
call[name[build_tree], parameter[name[i], name[new_node]]]
call[name[build_tree], parameter[name[root_node], constant[None]]] | keyword[def] identifier[rebuild_collection_tree] ( identifier[old_collection_ident] , identifier[new_document_id_map] , identifier[plpy] ):
literal[string]
identifier[get_tree] = identifier[plpy] . identifier[prepare] ( literal[string] ,( literal[string] ,))
keyword[def] identifier[get_old_tree] ():
keyword[return] identifier[plpy] . identifier[execute] ( identifier[get_tree] ,( identifier[old_collection_ident] ,))
identifier[tree] ={}
identifier[children] ={}
keyword[for] identifier[i] keyword[in] identifier[get_old_tree] ():
identifier[tree] [ identifier[i] [ literal[string] ]]={ literal[string] : identifier[i] , literal[string] : keyword[None] }
identifier[children] . identifier[setdefault] ( identifier[i] [ literal[string] ],[])
identifier[children] [ identifier[i] [ literal[string] ]]. identifier[append] ( identifier[i] [ literal[string] ])
identifier[insert_tree] = identifier[plpy] . identifier[prepare] ( literal[string] ,( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ))
keyword[def] identifier[execute] ( identifier[fields] ):
identifier[results] = identifier[plpy] . identifier[execute] ( identifier[insert_tree] , identifier[fields] , literal[int] )
keyword[return] identifier[results] [ literal[int] ][ literal[string] ]
identifier[root_node] = identifier[children] [ keyword[None] ][ literal[int] ]
keyword[def] identifier[build_tree] ( identifier[node] , identifier[parent] ):
identifier[data] = identifier[tree] [ identifier[node] ][ literal[string] ]
identifier[new_node] = identifier[execute] ([ identifier[parent] , identifier[new_document_id_map] . identifier[get] ( identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ]), identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]])
keyword[for] identifier[i] keyword[in] identifier[children] . identifier[get] ( identifier[node] ,[]):
identifier[build_tree] ( identifier[i] , identifier[new_node] )
identifier[build_tree] ( identifier[root_node] , keyword[None] ) | def rebuild_collection_tree(old_collection_ident, new_document_id_map, plpy):
"""Create a new tree for the collection based on the old tree.
This uses new document ids, replacing old ones.
"""
get_tree = plpy.prepare("\n WITH RECURSIVE t(node, parent, document, title, childorder, latest, path)\n AS (SELECT tr.nodeid, tr.parent_id, tr.documentid, tr.title,\n tr.childorder, tr.latest, ARRAY[tr.nodeid]\n FROM trees tr\n WHERE tr.documentid = $1 AND tr.is_collated = 'False'\n UNION ALL\n SELECT c.nodeid, c.parent_id, c.documentid, c.title,\n c.childorder, c.latest, path || ARRAY[c.nodeid]\n FROM trees c JOIN t ON (c.parent_id = t.node)\n WHERE not c.nodeid = ANY(t.path)\n )\n SELECT * FROM t\n ", ('integer',))
def get_old_tree():
return plpy.execute(get_tree, (old_collection_ident,))
tree = {} # { old_nodeid: {'data': ...}, ...}
children = {} # { nodeid: [child_nodeid, ...], child_nodeid: [...]}
for i in get_old_tree():
tree[i['node']] = {'data': i, 'new_nodeid': None}
children.setdefault(i['parent'], [])
children[i['parent']].append(i['node']) # depends on [control=['for'], data=['i']]
insert_tree = plpy.prepare('\n INSERT INTO trees (nodeid, parent_id, documentid,\n title, childorder, latest)\n VALUES (DEFAULT, $1, $2, $3, $4, $5)\n RETURNING nodeid\n ', ('integer', 'integer', 'text', 'integer', 'boolean'))
def execute(fields):
results = plpy.execute(insert_tree, fields, 1)
return results[0]['nodeid']
root_node = children[None][0]
def build_tree(node, parent):
data = tree[node]['data']
new_node = execute([parent, new_document_id_map.get(data['document'], data['document']), data['title'], data['childorder'], data['latest']])
for i in children.get(node, []):
build_tree(i, new_node) # depends on [control=['for'], data=['i']]
build_tree(root_node, None) |
def get_report(self):
""" describe the graph
:returns: report
:rtype: string
"""
ostr = ''
ostr += "Nodes: "+str(len(self.__nodes.keys()))+"\n"
ostr += "Edges: "+str(len(self.__edges.keys()))+"\n"
return ostr | def function[get_report, parameter[self]]:
constant[ describe the graph
:returns: report
:rtype: string
]
variable[ostr] assign[=] constant[]
<ast.AugAssign object at 0x7da1b0a66b90>
<ast.AugAssign object at 0x7da1b0a66e00>
return[name[ostr]] | keyword[def] identifier[get_report] ( identifier[self] ):
literal[string]
identifier[ostr] = literal[string]
identifier[ostr] += literal[string] + identifier[str] ( identifier[len] ( identifier[self] . identifier[__nodes] . identifier[keys] ()))+ literal[string]
identifier[ostr] += literal[string] + identifier[str] ( identifier[len] ( identifier[self] . identifier[__edges] . identifier[keys] ()))+ literal[string]
keyword[return] identifier[ostr] | def get_report(self):
""" describe the graph
:returns: report
:rtype: string
"""
ostr = ''
ostr += 'Nodes: ' + str(len(self.__nodes.keys())) + '\n'
ostr += 'Edges: ' + str(len(self.__edges.keys())) + '\n'
return ostr |
def addEntry(self):
"""Add the `Plot pyBAR data`. entry to `Dataset` menu.
"""
export_icon = QtGui.QIcon()
pixmap = QtGui.QPixmap(os.path.join(PLUGINSDIR,
'csv/icons/document-export.png'))
export_icon.addPixmap(pixmap, QtGui.QIcon.Normal, QtGui.QIcon.On)
self.plot_action = QtGui.QAction(
translate('PlotpyBARdata',
"Plot data with pyBAR plugin",
"Plot data with pyBAR plugin"),
self,
shortcut=QtGui.QKeySequence.UnknownKey, triggered=self.plot,
icon=export_icon,
statusTip=translate('PlotpyBARdata',
"Plotting of selected data with pyBAR",
"Status bar text for the Dataset -> Plot pyBAR data... action"))
# Add the action to the Dataset menu
menu = self.vtgui.dataset_menu
menu.addSeparator()
menu.addAction(self.plot_action)
# Add the action to the leaf context menu
cmenu = self.vtgui.leaf_node_cm
cmenu.addSeparator()
cmenu.addAction(self.plot_action) | def function[addEntry, parameter[self]]:
constant[Add the `Plot pyBAR data`. entry to `Dataset` menu.
]
variable[export_icon] assign[=] call[name[QtGui].QIcon, parameter[]]
variable[pixmap] assign[=] call[name[QtGui].QPixmap, parameter[call[name[os].path.join, parameter[name[PLUGINSDIR], constant[csv/icons/document-export.png]]]]]
call[name[export_icon].addPixmap, parameter[name[pixmap], name[QtGui].QIcon.Normal, name[QtGui].QIcon.On]]
name[self].plot_action assign[=] call[name[QtGui].QAction, parameter[call[name[translate], parameter[constant[PlotpyBARdata], constant[Plot data with pyBAR plugin], constant[Plot data with pyBAR plugin]]], name[self]]]
variable[menu] assign[=] name[self].vtgui.dataset_menu
call[name[menu].addSeparator, parameter[]]
call[name[menu].addAction, parameter[name[self].plot_action]]
variable[cmenu] assign[=] name[self].vtgui.leaf_node_cm
call[name[cmenu].addSeparator, parameter[]]
call[name[cmenu].addAction, parameter[name[self].plot_action]] | keyword[def] identifier[addEntry] ( identifier[self] ):
literal[string]
identifier[export_icon] = identifier[QtGui] . identifier[QIcon] ()
identifier[pixmap] = identifier[QtGui] . identifier[QPixmap] ( identifier[os] . identifier[path] . identifier[join] ( identifier[PLUGINSDIR] ,
literal[string] ))
identifier[export_icon] . identifier[addPixmap] ( identifier[pixmap] , identifier[QtGui] . identifier[QIcon] . identifier[Normal] , identifier[QtGui] . identifier[QIcon] . identifier[On] )
identifier[self] . identifier[plot_action] = identifier[QtGui] . identifier[QAction] (
identifier[translate] ( literal[string] ,
literal[string] ,
literal[string] ),
identifier[self] ,
identifier[shortcut] = identifier[QtGui] . identifier[QKeySequence] . identifier[UnknownKey] , identifier[triggered] = identifier[self] . identifier[plot] ,
identifier[icon] = identifier[export_icon] ,
identifier[statusTip] = identifier[translate] ( literal[string] ,
literal[string] ,
literal[string] ))
identifier[menu] = identifier[self] . identifier[vtgui] . identifier[dataset_menu]
identifier[menu] . identifier[addSeparator] ()
identifier[menu] . identifier[addAction] ( identifier[self] . identifier[plot_action] )
identifier[cmenu] = identifier[self] . identifier[vtgui] . identifier[leaf_node_cm]
identifier[cmenu] . identifier[addSeparator] ()
identifier[cmenu] . identifier[addAction] ( identifier[self] . identifier[plot_action] ) | def addEntry(self):
"""Add the `Plot pyBAR data`. entry to `Dataset` menu.
"""
export_icon = QtGui.QIcon()
pixmap = QtGui.QPixmap(os.path.join(PLUGINSDIR, 'csv/icons/document-export.png'))
export_icon.addPixmap(pixmap, QtGui.QIcon.Normal, QtGui.QIcon.On)
self.plot_action = QtGui.QAction(translate('PlotpyBARdata', 'Plot data with pyBAR plugin', 'Plot data with pyBAR plugin'), self, shortcut=QtGui.QKeySequence.UnknownKey, triggered=self.plot, icon=export_icon, statusTip=translate('PlotpyBARdata', 'Plotting of selected data with pyBAR', 'Status bar text for the Dataset -> Plot pyBAR data... action'))
# Add the action to the Dataset menu
menu = self.vtgui.dataset_menu
menu.addSeparator()
menu.addAction(self.plot_action)
# Add the action to the leaf context menu
cmenu = self.vtgui.leaf_node_cm
cmenu.addSeparator()
cmenu.addAction(self.plot_action) |
def ask_confirmation():
"""Ask for confirmation to the user. Return true if the user confirmed
the execution, false otherwise.
:returns: bool
"""
while True:
print("Do you want to restart these brokers? ", end="")
choice = input().lower()
if choice in ['yes', 'y']:
return True
elif choice in ['no', 'n']:
return False
else:
print("Please respond with 'yes' or 'no'") | def function[ask_confirmation, parameter[]]:
constant[Ask for confirmation to the user. Return true if the user confirmed
the execution, false otherwise.
:returns: bool
]
while constant[True] begin[:]
call[name[print], parameter[constant[Do you want to restart these brokers? ]]]
variable[choice] assign[=] call[call[name[input], parameter[]].lower, parameter[]]
if compare[name[choice] in list[[<ast.Constant object at 0x7da1b077a0e0>, <ast.Constant object at 0x7da1b077a4a0>]]] begin[:]
return[constant[True]] | keyword[def] identifier[ask_confirmation] ():
literal[string]
keyword[while] keyword[True] :
identifier[print] ( literal[string] , identifier[end] = literal[string] )
identifier[choice] = identifier[input] (). identifier[lower] ()
keyword[if] identifier[choice] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] keyword[True]
keyword[elif] identifier[choice] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] keyword[False]
keyword[else] :
identifier[print] ( literal[string] ) | def ask_confirmation():
"""Ask for confirmation to the user. Return true if the user confirmed
the execution, false otherwise.
:returns: bool
"""
while True:
print('Do you want to restart these brokers? ', end='')
choice = input().lower()
if choice in ['yes', 'y']:
return True # depends on [control=['if'], data=[]]
elif choice in ['no', 'n']:
return False # depends on [control=['if'], data=[]]
else:
print("Please respond with 'yes' or 'no'") # depends on [control=['while'], data=[]] |
def _create(self):
""" Create new object on IxNetwork.
:return: IXN object reference.
"""
if 'name' in self._data:
obj_ref = self.api.add(self.obj_parent(), self.obj_type(), name=self.obj_name())
else:
obj_ref = self.api.add(self.obj_parent(), self.obj_type())
self.api.commit()
return self.api.remapIds(obj_ref) | def function[_create, parameter[self]]:
constant[ Create new object on IxNetwork.
:return: IXN object reference.
]
if compare[constant[name] in name[self]._data] begin[:]
variable[obj_ref] assign[=] call[name[self].api.add, parameter[call[name[self].obj_parent, parameter[]], call[name[self].obj_type, parameter[]]]]
call[name[self].api.commit, parameter[]]
return[call[name[self].api.remapIds, parameter[name[obj_ref]]]] | keyword[def] identifier[_create] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_data] :
identifier[obj_ref] = identifier[self] . identifier[api] . identifier[add] ( identifier[self] . identifier[obj_parent] (), identifier[self] . identifier[obj_type] (), identifier[name] = identifier[self] . identifier[obj_name] ())
keyword[else] :
identifier[obj_ref] = identifier[self] . identifier[api] . identifier[add] ( identifier[self] . identifier[obj_parent] (), identifier[self] . identifier[obj_type] ())
identifier[self] . identifier[api] . identifier[commit] ()
keyword[return] identifier[self] . identifier[api] . identifier[remapIds] ( identifier[obj_ref] ) | def _create(self):
""" Create new object on IxNetwork.
:return: IXN object reference.
"""
if 'name' in self._data:
obj_ref = self.api.add(self.obj_parent(), self.obj_type(), name=self.obj_name()) # depends on [control=['if'], data=[]]
else:
obj_ref = self.api.add(self.obj_parent(), self.obj_type())
self.api.commit()
return self.api.remapIds(obj_ref) |
def getPlugins():
"""
Returns the list of valid ue4cli plugins
"""
# Retrieve the list of detected entry points in the ue4cli.plugins group
plugins = {
entry_point.name: entry_point.load()
for entry_point
in pkg_resources.iter_entry_points('ue4cli.plugins')
}
# Filter out any invalid plugins
plugins = {
name: plugins[name]
for name in plugins
if
'action' in plugins[name] and
'description' in plugins[name] and
'args' in plugins[name] and
callable(plugins[name]['action']) == True and
len(signature(plugins[name]['action']).parameters) == 2
}
return plugins | def function[getPlugins, parameter[]]:
constant[
Returns the list of valid ue4cli plugins
]
variable[plugins] assign[=] <ast.DictComp object at 0x7da18f00e080>
variable[plugins] assign[=] <ast.DictComp object at 0x7da18f00e980>
return[name[plugins]] | keyword[def] identifier[getPlugins] ():
literal[string]
identifier[plugins] ={
identifier[entry_point] . identifier[name] : identifier[entry_point] . identifier[load] ()
keyword[for] identifier[entry_point]
keyword[in] identifier[pkg_resources] . identifier[iter_entry_points] ( literal[string] )
}
identifier[plugins] ={
identifier[name] : identifier[plugins] [ identifier[name] ]
keyword[for] identifier[name] keyword[in] identifier[plugins]
keyword[if]
literal[string] keyword[in] identifier[plugins] [ identifier[name] ] keyword[and]
literal[string] keyword[in] identifier[plugins] [ identifier[name] ] keyword[and]
literal[string] keyword[in] identifier[plugins] [ identifier[name] ] keyword[and]
identifier[callable] ( identifier[plugins] [ identifier[name] ][ literal[string] ])== keyword[True] keyword[and]
identifier[len] ( identifier[signature] ( identifier[plugins] [ identifier[name] ][ literal[string] ]). identifier[parameters] )== literal[int]
}
keyword[return] identifier[plugins] | def getPlugins():
"""
Returns the list of valid ue4cli plugins
""" # Retrieve the list of detected entry points in the ue4cli.plugins group
plugins = {entry_point.name: entry_point.load() for entry_point in pkg_resources.iter_entry_points('ue4cli.plugins')} # Filter out any invalid plugins
plugins = {name: plugins[name] for name in plugins if 'action' in plugins[name] and 'description' in plugins[name] and ('args' in plugins[name]) and (callable(plugins[name]['action']) == True) and (len(signature(plugins[name]['action']).parameters) == 2)}
return plugins |
def _make_cmd_list(cmd_list):
"""
Helper function to easily create the proper json formated string from a list of strs
:param cmd_list: list of strings
:return: str json formatted
"""
cmd = ''
for i in cmd_list:
cmd = cmd + '"' + i + '",'
cmd = cmd[:-1]
return cmd | def function[_make_cmd_list, parameter[cmd_list]]:
constant[
Helper function to easily create the proper json formated string from a list of strs
:param cmd_list: list of strings
:return: str json formatted
]
variable[cmd] assign[=] constant[]
for taget[name[i]] in starred[name[cmd_list]] begin[:]
variable[cmd] assign[=] binary_operation[binary_operation[binary_operation[name[cmd] + constant["]] + name[i]] + constant[",]]
variable[cmd] assign[=] call[name[cmd]][<ast.Slice object at 0x7da20c76e3e0>]
return[name[cmd]] | keyword[def] identifier[_make_cmd_list] ( identifier[cmd_list] ):
literal[string]
identifier[cmd] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[cmd_list] :
identifier[cmd] = identifier[cmd] + literal[string] + identifier[i] + literal[string]
identifier[cmd] = identifier[cmd] [:- literal[int] ]
keyword[return] identifier[cmd] | def _make_cmd_list(cmd_list):
"""
Helper function to easily create the proper json formated string from a list of strs
:param cmd_list: list of strings
:return: str json formatted
"""
cmd = ''
for i in cmd_list:
cmd = cmd + '"' + i + '",' # depends on [control=['for'], data=['i']]
cmd = cmd[:-1]
return cmd |
def get_location(self,callb=None):
"""Convenience method to request the location from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
"""
if self.location is None:
mypartial=partial(self.resp_set_location)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
response = self.req_with_resp(GetLocation, StateLocation,callb=mycallb )
return self.location | def function[get_location, parameter[self, callb]]:
constant[Convenience method to request the location from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
]
if compare[name[self].location is constant[None]] begin[:]
variable[mypartial] assign[=] call[name[partial], parameter[name[self].resp_set_location]]
if name[callb] begin[:]
variable[mycallb] assign[=] <ast.Lambda object at 0x7da204565780>
variable[response] assign[=] call[name[self].req_with_resp, parameter[name[GetLocation], name[StateLocation]]]
return[name[self].location] | keyword[def] identifier[get_location] ( identifier[self] , identifier[callb] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[location] keyword[is] keyword[None] :
identifier[mypartial] = identifier[partial] ( identifier[self] . identifier[resp_set_location] )
keyword[if] identifier[callb] :
identifier[mycallb] = keyword[lambda] identifier[x] , identifier[y] :( identifier[mypartial] ( identifier[y] ), identifier[callb] ( identifier[x] , identifier[y] ))
keyword[else] :
identifier[mycallb] = keyword[lambda] identifier[x] , identifier[y] : identifier[mypartial] ( identifier[y] )
identifier[response] = identifier[self] . identifier[req_with_resp] ( identifier[GetLocation] , identifier[StateLocation] , identifier[callb] = identifier[mycallb] )
keyword[return] identifier[self] . identifier[location] | def get_location(self, callb=None):
"""Convenience method to request the location from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
"""
if self.location is None:
mypartial = partial(self.resp_set_location)
if callb:
mycallb = lambda x, y: (mypartial(y), callb(x, y)) # depends on [control=['if'], data=[]]
else:
mycallb = lambda x, y: mypartial(y)
response = self.req_with_resp(GetLocation, StateLocation, callb=mycallb) # depends on [control=['if'], data=[]]
return self.location |
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing.
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
# Things like `Series._get_value` (via .at) pass the EA directly here.
s = getattr(series, '_values', series)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20882, 21257
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, raise a KeyError if an integer
# index, otherwise, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
if (len(self) > 0 and
(self.holds_integer() or self.is_boolean())):
raise
elif is_integer(key):
return s[key]
s = com.values_from_object(series)
k = com.values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
raise
try:
return libindex.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key) | def function[get_value, parameter[self, series, key]]:
constant[
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing.
]
variable[s] assign[=] call[name[getattr], parameter[name[series], constant[_values], name[series]]]
if <ast.BoolOp object at 0x7da20e9565c0> begin[:]
<ast.Try object at 0x7da20e957160>
variable[s] assign[=] call[name[com].values_from_object, parameter[name[series]]]
variable[k] assign[=] call[name[com].values_from_object, parameter[name[key]]]
variable[k] assign[=] call[name[self]._convert_scalar_indexer, parameter[name[k]]]
<ast.Try object at 0x7da18f58de40> | keyword[def] identifier[get_value] ( identifier[self] , identifier[series] , identifier[key] ):
literal[string]
identifier[s] = identifier[getattr] ( identifier[series] , literal[string] , identifier[series] )
keyword[if] identifier[isinstance] ( identifier[s] ,( identifier[ExtensionArray] , identifier[Index] )) keyword[and] identifier[is_scalar] ( identifier[key] ):
keyword[try] :
identifier[iloc] = identifier[self] . identifier[get_loc] ( identifier[key] )
keyword[return] identifier[s] [ identifier[iloc] ]
keyword[except] identifier[KeyError] :
keyword[if] ( identifier[len] ( identifier[self] )> literal[int] keyword[and]
( identifier[self] . identifier[holds_integer] () keyword[or] identifier[self] . identifier[is_boolean] ())):
keyword[raise]
keyword[elif] identifier[is_integer] ( identifier[key] ):
keyword[return] identifier[s] [ identifier[key] ]
identifier[s] = identifier[com] . identifier[values_from_object] ( identifier[series] )
identifier[k] = identifier[com] . identifier[values_from_object] ( identifier[key] )
identifier[k] = identifier[self] . identifier[_convert_scalar_indexer] ( identifier[k] , identifier[kind] = literal[string] )
keyword[try] :
keyword[return] identifier[self] . identifier[_engine] . identifier[get_value] ( identifier[s] , identifier[k] ,
identifier[tz] = identifier[getattr] ( identifier[series] . identifier[dtype] , literal[string] , keyword[None] ))
keyword[except] identifier[KeyError] keyword[as] identifier[e1] :
keyword[if] identifier[len] ( identifier[self] )> literal[int] keyword[and] ( identifier[self] . identifier[holds_integer] () keyword[or] identifier[self] . identifier[is_boolean] ()):
keyword[raise]
keyword[try] :
keyword[return] identifier[libindex] . identifier[get_value_box] ( identifier[s] , identifier[key] )
keyword[except] identifier[IndexError] :
keyword[raise]
keyword[except] identifier[TypeError] :
keyword[if] identifier[is_iterator] ( identifier[key] ):
keyword[raise] identifier[InvalidIndexError] ( identifier[key] )
keyword[else] :
keyword[raise] identifier[e1]
keyword[except] identifier[Exception] :
keyword[raise] identifier[e1]
keyword[except] identifier[TypeError] :
keyword[if] identifier[is_scalar] ( identifier[key] ):
keyword[raise] identifier[IndexError] ( identifier[key] )
keyword[raise] identifier[InvalidIndexError] ( identifier[key] ) | def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing.
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
# Things like `Series._get_value` (via .at) pass the EA directly here.
s = getattr(series, '_values', series)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20882, 21257
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, raise a KeyError if an integer
# index, otherwise, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc] # depends on [control=['try'], data=[]]
except KeyError:
if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
raise # depends on [control=['if'], data=[]]
elif is_integer(key):
return s[key] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
s = com.values_from_object(series)
k = com.values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k, tz=getattr(series.dtype, 'tz', None)) # depends on [control=['try'], data=[]]
except KeyError as e1:
if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
raise # depends on [control=['if'], data=[]]
try:
return libindex.get_value_box(s, key) # depends on [control=['try'], data=[]]
except IndexError:
raise # depends on [control=['except'], data=[]]
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key) # depends on [control=['if'], data=[]]
else:
raise e1 # depends on [control=['except'], data=[]]
except Exception: # pragma: no cover
raise e1 # depends on [control=['except'], data=[]] # depends on [control=['except'], data=['e1']]
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key) # depends on [control=['if'], data=[]]
raise InvalidIndexError(key) # depends on [control=['except'], data=[]] |
def get_interfaces_ip(self):
'''Return IP interface data.'''
def extract_ip_info(parsed_intf_dict):
'''
IPv4:
- Primary IP is in the '<ip>' tag. If no v4 is configured the return value is 'N/A'.
- Secondary IP's are in '<addr>'. If no secondaries, this field is not returned by
the xmltodict.parse() method.
IPv6:
- All addresses are returned in '<addr6>'. If no v6 configured, this is not returned
either by xmltodict.parse().
Example of XML response for an intf with multiple IPv4 and IPv6 addresses:
<response status="success">
<result>
<ifnet>
<entry>
<name>ethernet1/5</name>
<zone/>
<fwd>N/A</fwd>
<vsys>1</vsys>
<dyn-addr/>
<addr6>
<member>fe80::d61d:71ff:fed8:fe14/64</member>
<member>2001::1234/120</member>
</addr6>
<tag>0</tag>
<ip>169.254.0.1/30</ip>
<id>20</id>
<addr>
<member>1.1.1.1/28</member>
</addr>
</entry>
{...}
</ifnet>
<hw>
{...}
</hw>
</result>
</response>
'''
intf = parsed_intf_dict['name']
_ip_info = {intf: {}}
v4_ip = parsed_intf_dict.get('ip')
secondary_v4_ip = parsed_intf_dict.get('addr')
v6_ip = parsed_intf_dict.get('addr6')
if v4_ip != 'N/A':
address, pref = v4_ip.split('/')
_ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)}
if secondary_v4_ip is not None:
members = secondary_v4_ip['member']
if not isinstance(members, list):
# If only 1 secondary IP is present, xmltodict converts field to a string, else
# it converts it to a list of strings.
members = [members]
for address in members:
address, pref = address.split('/')
_ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)}
if v6_ip is not None:
members = v6_ip['member']
if not isinstance(members, list):
# Same "1 vs many -> string vs list of strings" comment.
members = [members]
for address in members:
address, pref = address.split('/')
_ip_info[intf].setdefault('ipv6', {})[address] = {'prefix_length': int(pref)}
# Reset dictionary if no addresses were found.
if _ip_info == {intf: {}}:
_ip_info = {}
return _ip_info
ip_interfaces = {}
cmd = "<show><interface>all</interface></show>"
self.device.op(cmd=cmd)
interface_info_xml = xmltodict.parse(self.device.xml_root())
interface_info_json = json.dumps(
interface_info_xml['response']['result']['ifnet']['entry']
)
interface_info = json.loads(interface_info_json)
if isinstance(interface_info, dict):
# Same "1 vs many -> dict vs list of dicts" comment.
interface_info = [interface_info]
for interface_dict in interface_info:
ip_info = extract_ip_info(interface_dict)
if ip_info:
ip_interfaces.update(ip_info)
return ip_interfaces | def function[get_interfaces_ip, parameter[self]]:
constant[Return IP interface data.]
def function[extract_ip_info, parameter[parsed_intf_dict]]:
constant[
IPv4:
- Primary IP is in the '<ip>' tag. If no v4 is configured the return value is 'N/A'.
- Secondary IP's are in '<addr>'. If no secondaries, this field is not returned by
the xmltodict.parse() method.
IPv6:
- All addresses are returned in '<addr6>'. If no v6 configured, this is not returned
either by xmltodict.parse().
Example of XML response for an intf with multiple IPv4 and IPv6 addresses:
<response status="success">
<result>
<ifnet>
<entry>
<name>ethernet1/5</name>
<zone/>
<fwd>N/A</fwd>
<vsys>1</vsys>
<dyn-addr/>
<addr6>
<member>fe80::d61d:71ff:fed8:fe14/64</member>
<member>2001::1234/120</member>
</addr6>
<tag>0</tag>
<ip>169.254.0.1/30</ip>
<id>20</id>
<addr>
<member>1.1.1.1/28</member>
</addr>
</entry>
{...}
</ifnet>
<hw>
{...}
</hw>
</result>
</response>
]
variable[intf] assign[=] call[name[parsed_intf_dict]][constant[name]]
variable[_ip_info] assign[=] dictionary[[<ast.Name object at 0x7da1b0e0db70>], [<ast.Dict object at 0x7da1b0e0dd50>]]
variable[v4_ip] assign[=] call[name[parsed_intf_dict].get, parameter[constant[ip]]]
variable[secondary_v4_ip] assign[=] call[name[parsed_intf_dict].get, parameter[constant[addr]]]
variable[v6_ip] assign[=] call[name[parsed_intf_dict].get, parameter[constant[addr6]]]
if compare[name[v4_ip] not_equal[!=] constant[N/A]] begin[:]
<ast.Tuple object at 0x7da1b0e0f430> assign[=] call[name[v4_ip].split, parameter[constant[/]]]
call[call[call[name[_ip_info]][name[intf]].setdefault, parameter[constant[ipv4], dictionary[[], []]]]][name[address]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e0c4f0>], [<ast.Call object at 0x7da1b0e0d900>]]
if compare[name[secondary_v4_ip] is_not constant[None]] begin[:]
variable[members] assign[=] call[name[secondary_v4_ip]][constant[member]]
if <ast.UnaryOp object at 0x7da1b0e0f4f0> begin[:]
variable[members] assign[=] list[[<ast.Name object at 0x7da1b0e0fe50>]]
for taget[name[address]] in starred[name[members]] begin[:]
<ast.Tuple object at 0x7da1b0e0f3d0> assign[=] call[name[address].split, parameter[constant[/]]]
call[call[call[name[_ip_info]][name[intf]].setdefault, parameter[constant[ipv4], dictionary[[], []]]]][name[address]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e0e4d0>], [<ast.Call object at 0x7da1b0e0faf0>]]
if compare[name[v6_ip] is_not constant[None]] begin[:]
variable[members] assign[=] call[name[v6_ip]][constant[member]]
if <ast.UnaryOp object at 0x7da1b0e0fac0> begin[:]
variable[members] assign[=] list[[<ast.Name object at 0x7da1b0e0c190>]]
for taget[name[address]] in starred[name[members]] begin[:]
<ast.Tuple object at 0x7da1b0e0fa60> assign[=] call[name[address].split, parameter[constant[/]]]
call[call[call[name[_ip_info]][name[intf]].setdefault, parameter[constant[ipv6], dictionary[[], []]]]][name[address]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e0c820>], [<ast.Call object at 0x7da1b0e0f790>]]
if compare[name[_ip_info] equal[==] dictionary[[<ast.Name object at 0x7da1b0e0d930>], [<ast.Dict object at 0x7da1b0e0dde0>]]] begin[:]
variable[_ip_info] assign[=] dictionary[[], []]
return[name[_ip_info]]
variable[ip_interfaces] assign[=] dictionary[[], []]
variable[cmd] assign[=] constant[<show><interface>all</interface></show>]
call[name[self].device.op, parameter[]]
variable[interface_info_xml] assign[=] call[name[xmltodict].parse, parameter[call[name[self].device.xml_root, parameter[]]]]
variable[interface_info_json] assign[=] call[name[json].dumps, parameter[call[call[call[call[name[interface_info_xml]][constant[response]]][constant[result]]][constant[ifnet]]][constant[entry]]]]
variable[interface_info] assign[=] call[name[json].loads, parameter[name[interface_info_json]]]
if call[name[isinstance], parameter[name[interface_info], name[dict]]] begin[:]
variable[interface_info] assign[=] list[[<ast.Name object at 0x7da1b26acaf0>]]
for taget[name[interface_dict]] in starred[name[interface_info]] begin[:]
variable[ip_info] assign[=] call[name[extract_ip_info], parameter[name[interface_dict]]]
if name[ip_info] begin[:]
call[name[ip_interfaces].update, parameter[name[ip_info]]]
return[name[ip_interfaces]] | keyword[def] identifier[get_interfaces_ip] ( identifier[self] ):
literal[string]
keyword[def] identifier[extract_ip_info] ( identifier[parsed_intf_dict] ):
literal[string]
identifier[intf] = identifier[parsed_intf_dict] [ literal[string] ]
identifier[_ip_info] ={ identifier[intf] :{}}
identifier[v4_ip] = identifier[parsed_intf_dict] . identifier[get] ( literal[string] )
identifier[secondary_v4_ip] = identifier[parsed_intf_dict] . identifier[get] ( literal[string] )
identifier[v6_ip] = identifier[parsed_intf_dict] . identifier[get] ( literal[string] )
keyword[if] identifier[v4_ip] != literal[string] :
identifier[address] , identifier[pref] = identifier[v4_ip] . identifier[split] ( literal[string] )
identifier[_ip_info] [ identifier[intf] ]. identifier[setdefault] ( literal[string] ,{})[ identifier[address] ]={ literal[string] : identifier[int] ( identifier[pref] )}
keyword[if] identifier[secondary_v4_ip] keyword[is] keyword[not] keyword[None] :
identifier[members] = identifier[secondary_v4_ip] [ literal[string] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[members] , identifier[list] ):
identifier[members] =[ identifier[members] ]
keyword[for] identifier[address] keyword[in] identifier[members] :
identifier[address] , identifier[pref] = identifier[address] . identifier[split] ( literal[string] )
identifier[_ip_info] [ identifier[intf] ]. identifier[setdefault] ( literal[string] ,{})[ identifier[address] ]={ literal[string] : identifier[int] ( identifier[pref] )}
keyword[if] identifier[v6_ip] keyword[is] keyword[not] keyword[None] :
identifier[members] = identifier[v6_ip] [ literal[string] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[members] , identifier[list] ):
identifier[members] =[ identifier[members] ]
keyword[for] identifier[address] keyword[in] identifier[members] :
identifier[address] , identifier[pref] = identifier[address] . identifier[split] ( literal[string] )
identifier[_ip_info] [ identifier[intf] ]. identifier[setdefault] ( literal[string] ,{})[ identifier[address] ]={ literal[string] : identifier[int] ( identifier[pref] )}
keyword[if] identifier[_ip_info] =={ identifier[intf] :{}}:
identifier[_ip_info] ={}
keyword[return] identifier[_ip_info]
identifier[ip_interfaces] ={}
identifier[cmd] = literal[string]
identifier[self] . identifier[device] . identifier[op] ( identifier[cmd] = identifier[cmd] )
identifier[interface_info_xml] = identifier[xmltodict] . identifier[parse] ( identifier[self] . identifier[device] . identifier[xml_root] ())
identifier[interface_info_json] = identifier[json] . identifier[dumps] (
identifier[interface_info_xml] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
)
identifier[interface_info] = identifier[json] . identifier[loads] ( identifier[interface_info_json] )
keyword[if] identifier[isinstance] ( identifier[interface_info] , identifier[dict] ):
identifier[interface_info] =[ identifier[interface_info] ]
keyword[for] identifier[interface_dict] keyword[in] identifier[interface_info] :
identifier[ip_info] = identifier[extract_ip_info] ( identifier[interface_dict] )
keyword[if] identifier[ip_info] :
identifier[ip_interfaces] . identifier[update] ( identifier[ip_info] )
keyword[return] identifier[ip_interfaces] | def get_interfaces_ip(self):
"""Return IP interface data."""
def extract_ip_info(parsed_intf_dict):
"""
IPv4:
- Primary IP is in the '<ip>' tag. If no v4 is configured the return value is 'N/A'.
- Secondary IP's are in '<addr>'. If no secondaries, this field is not returned by
the xmltodict.parse() method.
IPv6:
- All addresses are returned in '<addr6>'. If no v6 configured, this is not returned
either by xmltodict.parse().
Example of XML response for an intf with multiple IPv4 and IPv6 addresses:
<response status="success">
<result>
<ifnet>
<entry>
<name>ethernet1/5</name>
<zone/>
<fwd>N/A</fwd>
<vsys>1</vsys>
<dyn-addr/>
<addr6>
<member>fe80::d61d:71ff:fed8:fe14/64</member>
<member>2001::1234/120</member>
</addr6>
<tag>0</tag>
<ip>169.254.0.1/30</ip>
<id>20</id>
<addr>
<member>1.1.1.1/28</member>
</addr>
</entry>
{...}
</ifnet>
<hw>
{...}
</hw>
</result>
</response>
"""
intf = parsed_intf_dict['name']
_ip_info = {intf: {}}
v4_ip = parsed_intf_dict.get('ip')
secondary_v4_ip = parsed_intf_dict.get('addr')
v6_ip = parsed_intf_dict.get('addr6')
if v4_ip != 'N/A':
(address, pref) = v4_ip.split('/')
_ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} # depends on [control=['if'], data=['v4_ip']]
if secondary_v4_ip is not None:
members = secondary_v4_ip['member']
if not isinstance(members, list):
# If only 1 secondary IP is present, xmltodict converts field to a string, else
# it converts it to a list of strings.
members = [members] # depends on [control=['if'], data=[]]
for address in members:
(address, pref) = address.split('/')
_ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} # depends on [control=['for'], data=['address']] # depends on [control=['if'], data=['secondary_v4_ip']]
if v6_ip is not None:
members = v6_ip['member']
if not isinstance(members, list):
# Same "1 vs many -> string vs list of strings" comment.
members = [members] # depends on [control=['if'], data=[]]
for address in members:
(address, pref) = address.split('/')
_ip_info[intf].setdefault('ipv6', {})[address] = {'prefix_length': int(pref)} # depends on [control=['for'], data=['address']] # depends on [control=['if'], data=['v6_ip']]
# Reset dictionary if no addresses were found.
if _ip_info == {intf: {}}:
_ip_info = {} # depends on [control=['if'], data=['_ip_info']]
return _ip_info
ip_interfaces = {}
cmd = '<show><interface>all</interface></show>'
self.device.op(cmd=cmd)
interface_info_xml = xmltodict.parse(self.device.xml_root())
interface_info_json = json.dumps(interface_info_xml['response']['result']['ifnet']['entry'])
interface_info = json.loads(interface_info_json)
if isinstance(interface_info, dict):
# Same "1 vs many -> dict vs list of dicts" comment.
interface_info = [interface_info] # depends on [control=['if'], data=[]]
for interface_dict in interface_info:
ip_info = extract_ip_info(interface_dict)
if ip_info:
ip_interfaces.update(ip_info) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['interface_dict']]
return ip_interfaces |
def _relative_name(self, record_name):
"""Returns sub-domain of a domain name"""
# Handle None and empty strings as None
if not record_name:
return None
subdomain = super(Provider, self)._relative_name(record_name)
return subdomain if subdomain else None | def function[_relative_name, parameter[self, record_name]]:
constant[Returns sub-domain of a domain name]
if <ast.UnaryOp object at 0x7da1b1d22f20> begin[:]
return[constant[None]]
variable[subdomain] assign[=] call[call[name[super], parameter[name[Provider], name[self]]]._relative_name, parameter[name[record_name]]]
return[<ast.IfExp object at 0x7da1b1d23160>] | keyword[def] identifier[_relative_name] ( identifier[self] , identifier[record_name] ):
literal[string]
keyword[if] keyword[not] identifier[record_name] :
keyword[return] keyword[None]
identifier[subdomain] = identifier[super] ( identifier[Provider] , identifier[self] ). identifier[_relative_name] ( identifier[record_name] )
keyword[return] identifier[subdomain] keyword[if] identifier[subdomain] keyword[else] keyword[None] | def _relative_name(self, record_name):
"""Returns sub-domain of a domain name"""
# Handle None and empty strings as None
if not record_name:
return None # depends on [control=['if'], data=[]]
subdomain = super(Provider, self)._relative_name(record_name)
return subdomain if subdomain else None |
def dumps(obj):
"""Outputs json with formatting edits + object handling."""
return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder) | def function[dumps, parameter[obj]]:
constant[Outputs json with formatting edits + object handling.]
return[call[name[json].dumps, parameter[name[obj]]]] | keyword[def] identifier[dumps] ( identifier[obj] ):
literal[string]
keyword[return] identifier[json] . identifier[dumps] ( identifier[obj] , identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] , identifier[cls] = identifier[CustomEncoder] ) | def dumps(obj):
"""Outputs json with formatting edits + object handling."""
return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder) |
def index(self, val, start=None, stop=None):
"""
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
"""
_len = self._len
if not _len:
raise ValueError('{0!r} is not in list'.format(val))
if start is None:
start = 0
if start < 0:
start += _len
if start < 0:
start = 0
if stop is None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError('{0!r} is not in list'.format(val))
_maxes = self._maxes
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0!r} is not in list'.format(val))
stop -= 1
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0!r} is not in list'.format(val))
if _lists[pos][idx] == val:
loc = self._loc(pos, idx)
if start <= loc <= stop:
return loc
elif loc > stop:
break
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0!r} is not in list'.format(val))
len_sublist = len(_keys[pos])
idx = 0
raise ValueError('{0!r} is not in list'.format(val)) | def function[index, parameter[self, val, start, stop]]:
constant[
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
]
variable[_len] assign[=] name[self]._len
if <ast.UnaryOp object at 0x7da1b17ce7a0> begin[:]
<ast.Raise object at 0x7da1b17cfe50>
if compare[name[start] is constant[None]] begin[:]
variable[start] assign[=] constant[0]
if compare[name[start] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b17cefe0>
if compare[name[start] less[<] constant[0]] begin[:]
variable[start] assign[=] constant[0]
if compare[name[stop] is constant[None]] begin[:]
variable[stop] assign[=] name[_len]
if compare[name[stop] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b17cddb0>
if compare[name[stop] greater[>] name[_len]] begin[:]
variable[stop] assign[=] name[_len]
if compare[name[stop] less_or_equal[<=] name[start]] begin[:]
<ast.Raise object at 0x7da1b17cdba0>
variable[_maxes] assign[=] name[self]._maxes
variable[key] assign[=] call[name[self]._key, parameter[name[val]]]
variable[pos] assign[=] call[name[bisect_left], parameter[name[_maxes], name[key]]]
if compare[name[pos] equal[==] call[name[len], parameter[name[_maxes]]]] begin[:]
<ast.Raise object at 0x7da18eb57e80>
<ast.AugAssign object at 0x7da20e9b35e0>
variable[_lists] assign[=] name[self]._lists
variable[_keys] assign[=] name[self]._keys
variable[idx] assign[=] call[name[bisect_left], parameter[call[name[_keys]][name[pos]], name[key]]]
variable[len_keys] assign[=] call[name[len], parameter[name[_keys]]]
variable[len_sublist] assign[=] call[name[len], parameter[call[name[_keys]][name[pos]]]]
while constant[True] begin[:]
if compare[call[call[name[_keys]][name[pos]]][name[idx]] not_equal[!=] name[key]] begin[:]
<ast.Raise object at 0x7da1b17cd150>
if compare[call[call[name[_lists]][name[pos]]][name[idx]] equal[==] name[val]] begin[:]
variable[loc] assign[=] call[name[self]._loc, parameter[name[pos], name[idx]]]
if compare[name[start] less_or_equal[<=] name[loc]] begin[:]
return[name[loc]]
<ast.AugAssign object at 0x7da1b17cce80>
if compare[name[idx] equal[==] name[len_sublist]] begin[:]
<ast.AugAssign object at 0x7da1b17ccc40>
if compare[name[pos] equal[==] name[len_keys]] begin[:]
<ast.Raise object at 0x7da1b17cc9a0>
variable[len_sublist] assign[=] call[name[len], parameter[call[name[_keys]][name[pos]]]]
variable[idx] assign[=] constant[0]
<ast.Raise object at 0x7da1b17cc340> | keyword[def] identifier[index] ( identifier[self] , identifier[val] , identifier[start] = keyword[None] , identifier[stop] = keyword[None] ):
literal[string]
identifier[_len] = identifier[self] . identifier[_len]
keyword[if] keyword[not] identifier[_len] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[val] ))
keyword[if] identifier[start] keyword[is] keyword[None] :
identifier[start] = literal[int]
keyword[if] identifier[start] < literal[int] :
identifier[start] += identifier[_len]
keyword[if] identifier[start] < literal[int] :
identifier[start] = literal[int]
keyword[if] identifier[stop] keyword[is] keyword[None] :
identifier[stop] = identifier[_len]
keyword[if] identifier[stop] < literal[int] :
identifier[stop] += identifier[_len]
keyword[if] identifier[stop] > identifier[_len] :
identifier[stop] = identifier[_len]
keyword[if] identifier[stop] <= identifier[start] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[val] ))
identifier[_maxes] = identifier[self] . identifier[_maxes]
identifier[key] = identifier[self] . identifier[_key] ( identifier[val] )
identifier[pos] = identifier[bisect_left] ( identifier[_maxes] , identifier[key] )
keyword[if] identifier[pos] == identifier[len] ( identifier[_maxes] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[val] ))
identifier[stop] -= literal[int]
identifier[_lists] = identifier[self] . identifier[_lists]
identifier[_keys] = identifier[self] . identifier[_keys]
identifier[idx] = identifier[bisect_left] ( identifier[_keys] [ identifier[pos] ], identifier[key] )
identifier[len_keys] = identifier[len] ( identifier[_keys] )
identifier[len_sublist] = identifier[len] ( identifier[_keys] [ identifier[pos] ])
keyword[while] keyword[True] :
keyword[if] identifier[_keys] [ identifier[pos] ][ identifier[idx] ]!= identifier[key] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[val] ))
keyword[if] identifier[_lists] [ identifier[pos] ][ identifier[idx] ]== identifier[val] :
identifier[loc] = identifier[self] . identifier[_loc] ( identifier[pos] , identifier[idx] )
keyword[if] identifier[start] <= identifier[loc] <= identifier[stop] :
keyword[return] identifier[loc]
keyword[elif] identifier[loc] > identifier[stop] :
keyword[break]
identifier[idx] += literal[int]
keyword[if] identifier[idx] == identifier[len_sublist] :
identifier[pos] += literal[int]
keyword[if] identifier[pos] == identifier[len_keys] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[val] ))
identifier[len_sublist] = identifier[len] ( identifier[_keys] [ identifier[pos] ])
identifier[idx] = literal[int]
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[val] )) | def index(self, val, start=None, stop=None):
"""
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
"""
_len = self._len
if not _len:
raise ValueError('{0!r} is not in list'.format(val)) # depends on [control=['if'], data=[]]
if start is None:
start = 0 # depends on [control=['if'], data=['start']]
if start < 0:
start += _len # depends on [control=['if'], data=['start']]
if start < 0:
start = 0 # depends on [control=['if'], data=['start']]
if stop is None:
stop = _len # depends on [control=['if'], data=['stop']]
if stop < 0:
stop += _len # depends on [control=['if'], data=['stop']]
if stop > _len:
stop = _len # depends on [control=['if'], data=['stop', '_len']]
if stop <= start:
raise ValueError('{0!r} is not in list'.format(val)) # depends on [control=['if'], data=[]]
_maxes = self._maxes
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0!r} is not in list'.format(val)) # depends on [control=['if'], data=[]]
stop -= 1
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0!r} is not in list'.format(val)) # depends on [control=['if'], data=[]]
if _lists[pos][idx] == val:
loc = self._loc(pos, idx)
if start <= loc <= stop:
return loc # depends on [control=['if'], data=['loc']]
elif loc > stop:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0!r} is not in list'.format(val)) # depends on [control=['if'], data=[]]
len_sublist = len(_keys[pos])
idx = 0 # depends on [control=['if'], data=['idx', 'len_sublist']] # depends on [control=['while'], data=[]]
raise ValueError('{0!r} is not in list'.format(val)) |
def _allocate_from_v6_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None,
**kwargs):
"""This attempts to allocate v6 addresses as per RFC2462 and RFC3041.
To accomodate this, we effectively treat all v6 assignment as a
first time allocation utilizing the MAC address of the VIF. Because
we recycle MACs, we will eventually attempt to recreate a previously
generated v6 address. Instead of failing, we've opted to handle
reallocating that address in this method.
This should provide a performance boost over attempting to check
each and every subnet in the existing reallocate logic, as we'd
have to iterate over each and every subnet returned
"""
LOG.info("Attempting to allocate a v6 address - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, subnet=subnet,
port_id=port_id, ip_address=ip_address)))
if ip_address:
LOG.info("IP %s explicitly requested, deferring to standard "
"allocation" % ip_address)
return self._allocate_from_subnet(context, net_id=net_id,
subnet=subnet, port_id=port_id,
reuse_after=reuse_after,
ip_address=ip_address, **kwargs)
else:
mac = kwargs.get("mac_address")
if mac:
mac = kwargs["mac_address"].get("address")
if subnet and subnet["ip_policy"]:
ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set()
else:
ip_policy_cidrs = netaddr.IPSet([])
for tries, ip_address in enumerate(
generate_v6(mac, port_id, subnet["cidr"])):
LOG.info("Attempt {0} of {1}".format(
tries + 1, CONF.QUARK.v6_allocation_attempts))
if tries > CONF.QUARK.v6_allocation_attempts - 1:
LOG.info("Exceeded v6 allocation attempts, bailing")
raise ip_address_failure(net_id)
ip_address = netaddr.IPAddress(ip_address).ipv6()
LOG.info("Generated a new v6 address {0}".format(
str(ip_address)))
if (ip_policy_cidrs is not None and
ip_address in ip_policy_cidrs):
LOG.info("Address {0} excluded by policy".format(
str(ip_address)))
continue
try:
with context.session.begin():
address = db_api.ip_address_create(
context, address=ip_address,
subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id,
address_type=kwargs.get('address_type',
ip_types.FIXED))
return address
except db_exception.DBDuplicateEntry:
# This shouldn't ever happen, since we hold a unique MAC
# address from the previous IPAM step.
LOG.info("{0} exists but was already "
"allocated".format(str(ip_address)))
LOG.debug("Duplicate entry found when inserting subnet_id"
" %s ip_address %s", subnet["id"], ip_address) | def function[_allocate_from_v6_subnet, parameter[self, context, net_id, subnet, port_id, reuse_after, ip_address]]:
constant[This attempts to allocate v6 addresses as per RFC2462 and RFC3041.
To accomodate this, we effectively treat all v6 assignment as a
first time allocation utilizing the MAC address of the VIF. Because
we recycle MACs, we will eventually attempt to recreate a previously
generated v6 address. Instead of failing, we've opted to handle
reallocating that address in this method.
This should provide a performance boost over attempting to check
each and every subnet in the existing reallocate logic, as we'd
have to iterate over each and every subnet returned
]
call[name[LOG].info, parameter[call[constant[Attempting to allocate a v6 address - [{0}]].format, parameter[call[name[utils].pretty_kwargs, parameter[]]]]]]
if name[ip_address] begin[:]
call[name[LOG].info, parameter[binary_operation[constant[IP %s explicitly requested, deferring to standard allocation] <ast.Mod object at 0x7da2590d6920> name[ip_address]]]]
return[call[name[self]._allocate_from_subnet, parameter[name[context]]]] | keyword[def] identifier[_allocate_from_v6_subnet] ( identifier[self] , identifier[context] , identifier[net_id] , identifier[subnet] ,
identifier[port_id] , identifier[reuse_after] , identifier[ip_address] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] (
identifier[utils] . identifier[pretty_kwargs] ( identifier[network_id] = identifier[net_id] , identifier[subnet] = identifier[subnet] ,
identifier[port_id] = identifier[port_id] , identifier[ip_address] = identifier[ip_address] )))
keyword[if] identifier[ip_address] :
identifier[LOG] . identifier[info] ( literal[string]
literal[string] % identifier[ip_address] )
keyword[return] identifier[self] . identifier[_allocate_from_subnet] ( identifier[context] , identifier[net_id] = identifier[net_id] ,
identifier[subnet] = identifier[subnet] , identifier[port_id] = identifier[port_id] ,
identifier[reuse_after] = identifier[reuse_after] ,
identifier[ip_address] = identifier[ip_address] ,** identifier[kwargs] )
keyword[else] :
identifier[mac] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[mac] :
identifier[mac] = identifier[kwargs] [ literal[string] ]. identifier[get] ( literal[string] )
keyword[if] identifier[subnet] keyword[and] identifier[subnet] [ literal[string] ]:
identifier[ip_policy_cidrs] = identifier[subnet] [ literal[string] ]. identifier[get_cidrs_ip_set] ()
keyword[else] :
identifier[ip_policy_cidrs] = identifier[netaddr] . identifier[IPSet] ([])
keyword[for] identifier[tries] , identifier[ip_address] keyword[in] identifier[enumerate] (
identifier[generate_v6] ( identifier[mac] , identifier[port_id] , identifier[subnet] [ literal[string] ])):
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] (
identifier[tries] + literal[int] , identifier[CONF] . identifier[QUARK] . identifier[v6_allocation_attempts] ))
keyword[if] identifier[tries] > identifier[CONF] . identifier[QUARK] . identifier[v6_allocation_attempts] - literal[int] :
identifier[LOG] . identifier[info] ( literal[string] )
keyword[raise] identifier[ip_address_failure] ( identifier[net_id] )
identifier[ip_address] = identifier[netaddr] . identifier[IPAddress] ( identifier[ip_address] ). identifier[ipv6] ()
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] (
identifier[str] ( identifier[ip_address] )))
keyword[if] ( identifier[ip_policy_cidrs] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[ip_address] keyword[in] identifier[ip_policy_cidrs] ):
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] (
identifier[str] ( identifier[ip_address] )))
keyword[continue]
keyword[try] :
keyword[with] identifier[context] . identifier[session] . identifier[begin] ():
identifier[address] = identifier[db_api] . identifier[ip_address_create] (
identifier[context] , identifier[address] = identifier[ip_address] ,
identifier[subnet_id] = identifier[subnet] [ literal[string] ],
identifier[version] = identifier[subnet] [ literal[string] ], identifier[network_id] = identifier[net_id] ,
identifier[address_type] = identifier[kwargs] . identifier[get] ( literal[string] ,
identifier[ip_types] . identifier[FIXED] ))
keyword[return] identifier[address]
keyword[except] identifier[db_exception] . identifier[DBDuplicateEntry] :
identifier[LOG] . identifier[info] ( literal[string]
literal[string] . identifier[format] ( identifier[str] ( identifier[ip_address] )))
identifier[LOG] . identifier[debug] ( literal[string]
literal[string] , identifier[subnet] [ literal[string] ], identifier[ip_address] ) | def _allocate_from_v6_subnet(self, context, net_id, subnet, port_id, reuse_after, ip_address=None, **kwargs):
"""This attempts to allocate v6 addresses as per RFC2462 and RFC3041.
To accomodate this, we effectively treat all v6 assignment as a
first time allocation utilizing the MAC address of the VIF. Because
we recycle MACs, we will eventually attempt to recreate a previously
generated v6 address. Instead of failing, we've opted to handle
reallocating that address in this method.
This should provide a performance boost over attempting to check
each and every subnet in the existing reallocate logic, as we'd
have to iterate over each and every subnet returned
"""
LOG.info('Attempting to allocate a v6 address - [{0}]'.format(utils.pretty_kwargs(network_id=net_id, subnet=subnet, port_id=port_id, ip_address=ip_address)))
if ip_address:
LOG.info('IP %s explicitly requested, deferring to standard allocation' % ip_address)
return self._allocate_from_subnet(context, net_id=net_id, subnet=subnet, port_id=port_id, reuse_after=reuse_after, ip_address=ip_address, **kwargs) # depends on [control=['if'], data=[]]
else:
mac = kwargs.get('mac_address')
if mac:
mac = kwargs['mac_address'].get('address') # depends on [control=['if'], data=[]]
if subnet and subnet['ip_policy']:
ip_policy_cidrs = subnet['ip_policy'].get_cidrs_ip_set() # depends on [control=['if'], data=[]]
else:
ip_policy_cidrs = netaddr.IPSet([])
for (tries, ip_address) in enumerate(generate_v6(mac, port_id, subnet['cidr'])):
LOG.info('Attempt {0} of {1}'.format(tries + 1, CONF.QUARK.v6_allocation_attempts))
if tries > CONF.QUARK.v6_allocation_attempts - 1:
LOG.info('Exceeded v6 allocation attempts, bailing')
raise ip_address_failure(net_id) # depends on [control=['if'], data=[]]
ip_address = netaddr.IPAddress(ip_address).ipv6()
LOG.info('Generated a new v6 address {0}'.format(str(ip_address)))
if ip_policy_cidrs is not None and ip_address in ip_policy_cidrs:
LOG.info('Address {0} excluded by policy'.format(str(ip_address)))
continue # depends on [control=['if'], data=[]]
try:
with context.session.begin():
address = db_api.ip_address_create(context, address=ip_address, subnet_id=subnet['id'], version=subnet['ip_version'], network_id=net_id, address_type=kwargs.get('address_type', ip_types.FIXED))
return address # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except db_exception.DBDuplicateEntry:
# This shouldn't ever happen, since we hold a unique MAC
# address from the previous IPAM step.
LOG.info('{0} exists but was already allocated'.format(str(ip_address)))
LOG.debug('Duplicate entry found when inserting subnet_id %s ip_address %s', subnet['id'], ip_address) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] |
def expand_tamil(start,end):
""" expand uyir or mei-letter range etc.
i.e. அ-ஔ gets converted to அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ etc.
"""
# few sequences
for seq in [utf8.uyir_letters, utf8.grantha_mei_letters, \
utf8.grantha_agaram_letters]:
if is_containing_seq(start,end,seq):
return expand_sequence(start,end,seq)
# all Tamil letters
seq = utf8.grantha_uyirmei_letters
if is_containing_seq(start,end,seq):
return expand_sequence(start,end,seq)
raise Exception("Cannot understand sequence [%s-%s]"%(start,end)) | def function[expand_tamil, parameter[start, end]]:
constant[ expand uyir or mei-letter range etc.
i.e. அ-ஔ gets converted to அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ etc.
]
for taget[name[seq]] in starred[list[[<ast.Attribute object at 0x7da1b06982e0>, <ast.Attribute object at 0x7da1b06985e0>, <ast.Attribute object at 0x7da1b0698c70>]]] begin[:]
if call[name[is_containing_seq], parameter[name[start], name[end], name[seq]]] begin[:]
return[call[name[expand_sequence], parameter[name[start], name[end], name[seq]]]]
variable[seq] assign[=] name[utf8].grantha_uyirmei_letters
if call[name[is_containing_seq], parameter[name[start], name[end], name[seq]]] begin[:]
return[call[name[expand_sequence], parameter[name[start], name[end], name[seq]]]]
<ast.Raise object at 0x7da1b069d270> | keyword[def] identifier[expand_tamil] ( identifier[start] , identifier[end] ):
literal[string]
keyword[for] identifier[seq] keyword[in] [ identifier[utf8] . identifier[uyir_letters] , identifier[utf8] . identifier[grantha_mei_letters] , identifier[utf8] . identifier[grantha_agaram_letters] ]:
keyword[if] identifier[is_containing_seq] ( identifier[start] , identifier[end] , identifier[seq] ):
keyword[return] identifier[expand_sequence] ( identifier[start] , identifier[end] , identifier[seq] )
identifier[seq] = identifier[utf8] . identifier[grantha_uyirmei_letters]
keyword[if] identifier[is_containing_seq] ( identifier[start] , identifier[end] , identifier[seq] ):
keyword[return] identifier[expand_sequence] ( identifier[start] , identifier[end] , identifier[seq] )
keyword[raise] identifier[Exception] ( literal[string] %( identifier[start] , identifier[end] )) | def expand_tamil(start, end):
""" expand uyir or mei-letter range etc.
i.e. அ-ஔ gets converted to அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ etc.
"""
# few sequences
for seq in [utf8.uyir_letters, utf8.grantha_mei_letters, utf8.grantha_agaram_letters]:
if is_containing_seq(start, end, seq):
return expand_sequence(start, end, seq) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['seq']]
# all Tamil letters
seq = utf8.grantha_uyirmei_letters
if is_containing_seq(start, end, seq):
return expand_sequence(start, end, seq) # depends on [control=['if'], data=[]]
raise Exception('Cannot understand sequence [%s-%s]' % (start, end)) |
def allPolygons(self):
"""
Return a list of all polygons in this BSP tree.
"""
polygons = self.polygons[:]
if self.front:
polygons.extend(self.front.allPolygons())
if self.back:
polygons.extend(self.back.allPolygons())
return polygons | def function[allPolygons, parameter[self]]:
constant[
Return a list of all polygons in this BSP tree.
]
variable[polygons] assign[=] call[name[self].polygons][<ast.Slice object at 0x7da18c4cd060>]
if name[self].front begin[:]
call[name[polygons].extend, parameter[call[name[self].front.allPolygons, parameter[]]]]
if name[self].back begin[:]
call[name[polygons].extend, parameter[call[name[self].back.allPolygons, parameter[]]]]
return[name[polygons]] | keyword[def] identifier[allPolygons] ( identifier[self] ):
literal[string]
identifier[polygons] = identifier[self] . identifier[polygons] [:]
keyword[if] identifier[self] . identifier[front] :
identifier[polygons] . identifier[extend] ( identifier[self] . identifier[front] . identifier[allPolygons] ())
keyword[if] identifier[self] . identifier[back] :
identifier[polygons] . identifier[extend] ( identifier[self] . identifier[back] . identifier[allPolygons] ())
keyword[return] identifier[polygons] | def allPolygons(self):
"""
Return a list of all polygons in this BSP tree.
"""
polygons = self.polygons[:]
if self.front:
polygons.extend(self.front.allPolygons()) # depends on [control=['if'], data=[]]
if self.back:
polygons.extend(self.back.allPolygons()) # depends on [control=['if'], data=[]]
return polygons |
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseGlyph.isCompatible`.
Subclasses may override this method.
"""
glyph1 = self
glyph2 = other
# contour count
if len(self.contours) != len(glyph2.contours):
reporter.fatal = True
reporter.contourCountDifference = True
# contour pairs
for i in range(min(len(glyph1), len(glyph2))):
contour1 = glyph1[i]
contour2 = glyph2[i]
self._checkPairs(contour1, contour2, reporter, reporter.contours)
# component count
if len(glyph1.components) != len(glyph2.components):
reporter.fatal = True
reporter.componentCountDifference = True
# component check
component_diff = []
selfComponents = [component.baseGlyph for component in glyph1.components]
otherComponents = [component.baseGlyph for component in glyph2.components]
for index, (left, right) in enumerate(
zip_longest(selfComponents, otherComponents)
):
if left != right:
component_diff.append((index, left, right))
if component_diff:
reporter.warning = True
reporter.componentDifferences = component_diff
if not reporter.componentCountDifference and set(selfComponents) == set(
otherComponents
):
reporter.componentOrderDifference = True
selfComponents_counted_set = collections.Counter(selfComponents)
otherComponents_counted_set = collections.Counter(otherComponents)
missing_from_glyph1 = (
otherComponents_counted_set - selfComponents_counted_set
)
if missing_from_glyph1:
reporter.fatal = True
reporter.componentsMissingFromGlyph1 = sorted(
missing_from_glyph1.elements()
)
missing_from_glyph2 = (
selfComponents_counted_set - otherComponents_counted_set
)
if missing_from_glyph2:
reporter.fatal = True
reporter.componentsMissingFromGlyph2 = sorted(
missing_from_glyph2.elements()
)
# guideline count
if len(self.guidelines) != len(glyph2.guidelines):
reporter.warning = True
reporter.guidelineCountDifference = True
# guideline check
selfGuidelines = []
otherGuidelines = []
for source, names in ((self, selfGuidelines),
(other, otherGuidelines)):
for i, guideline in enumerate(source.guidelines):
names.append((guideline.name, i))
guidelines1 = set(selfGuidelines)
guidelines2 = set(otherGuidelines)
if len(guidelines1.difference(guidelines2)) != 0:
reporter.warning = True
reporter.guidelinesMissingFromGlyph2 = list(
guidelines1.difference(guidelines2))
if len(guidelines2.difference(guidelines1)) != 0:
reporter.warning = True
reporter.guidelinesMissingFromGlyph1 = list(
guidelines2.difference(guidelines1))
# anchor count
if len(self.anchors) != len(glyph2.anchors):
reporter.warning = True
reporter.anchorCountDifference = True
# anchor check
anchor_diff = []
selfAnchors = [anchor.name for anchor in glyph1.anchors]
otherAnchors = [anchor.name for anchor in glyph2.anchors]
for index, (left, right) in enumerate(zip_longest(selfAnchors, otherAnchors)):
if left != right:
anchor_diff.append((index, left, right))
if anchor_diff:
reporter.warning = True
reporter.anchorDifferences = anchor_diff
if not reporter.anchorCountDifference and set(selfAnchors) == set(
otherAnchors
):
reporter.anchorOrderDifference = True
selfAnchors_counted_set = collections.Counter(selfAnchors)
otherAnchors_counted_set = collections.Counter(otherAnchors)
missing_from_glyph1 = otherAnchors_counted_set - selfAnchors_counted_set
if missing_from_glyph1:
reporter.anchorsMissingFromGlyph1 = sorted(
missing_from_glyph1.elements()
)
missing_from_glyph2 = selfAnchors_counted_set - otherAnchors_counted_set
if missing_from_glyph2:
reporter.anchorsMissingFromGlyph2 = sorted(
missing_from_glyph2.elements()
) | def function[_isCompatible, parameter[self, other, reporter]]:
constant[
This is the environment implementation of
:meth:`BaseGlyph.isCompatible`.
Subclasses may override this method.
]
variable[glyph1] assign[=] name[self]
variable[glyph2] assign[=] name[other]
if compare[call[name[len], parameter[name[self].contours]] not_equal[!=] call[name[len], parameter[name[glyph2].contours]]] begin[:]
name[reporter].fatal assign[=] constant[True]
name[reporter].contourCountDifference assign[=] constant[True]
for taget[name[i]] in starred[call[name[range], parameter[call[name[min], parameter[call[name[len], parameter[name[glyph1]]], call[name[len], parameter[name[glyph2]]]]]]]] begin[:]
variable[contour1] assign[=] call[name[glyph1]][name[i]]
variable[contour2] assign[=] call[name[glyph2]][name[i]]
call[name[self]._checkPairs, parameter[name[contour1], name[contour2], name[reporter], name[reporter].contours]]
if compare[call[name[len], parameter[name[glyph1].components]] not_equal[!=] call[name[len], parameter[name[glyph2].components]]] begin[:]
name[reporter].fatal assign[=] constant[True]
name[reporter].componentCountDifference assign[=] constant[True]
variable[component_diff] assign[=] list[[]]
variable[selfComponents] assign[=] <ast.ListComp object at 0x7da20e748580>
variable[otherComponents] assign[=] <ast.ListComp object at 0x7da212db4ca0>
for taget[tuple[[<ast.Name object at 0x7da20c992680>, <ast.Tuple object at 0x7da20c9938e0>]]] in starred[call[name[enumerate], parameter[call[name[zip_longest], parameter[name[selfComponents], name[otherComponents]]]]]] begin[:]
if compare[name[left] not_equal[!=] name[right]] begin[:]
call[name[component_diff].append, parameter[tuple[[<ast.Name object at 0x7da20c991ba0>, <ast.Name object at 0x7da20c9910f0>, <ast.Name object at 0x7da20c992dd0>]]]]
if name[component_diff] begin[:]
name[reporter].warning assign[=] constant[True]
name[reporter].componentDifferences assign[=] name[component_diff]
if <ast.BoolOp object at 0x7da20c990280> begin[:]
name[reporter].componentOrderDifference assign[=] constant[True]
variable[selfComponents_counted_set] assign[=] call[name[collections].Counter, parameter[name[selfComponents]]]
variable[otherComponents_counted_set] assign[=] call[name[collections].Counter, parameter[name[otherComponents]]]
variable[missing_from_glyph1] assign[=] binary_operation[name[otherComponents_counted_set] - name[selfComponents_counted_set]]
if name[missing_from_glyph1] begin[:]
name[reporter].fatal assign[=] constant[True]
name[reporter].componentsMissingFromGlyph1 assign[=] call[name[sorted], parameter[call[name[missing_from_glyph1].elements, parameter[]]]]
variable[missing_from_glyph2] assign[=] binary_operation[name[selfComponents_counted_set] - name[otherComponents_counted_set]]
if name[missing_from_glyph2] begin[:]
name[reporter].fatal assign[=] constant[True]
name[reporter].componentsMissingFromGlyph2 assign[=] call[name[sorted], parameter[call[name[missing_from_glyph2].elements, parameter[]]]]
if compare[call[name[len], parameter[name[self].guidelines]] not_equal[!=] call[name[len], parameter[name[glyph2].guidelines]]] begin[:]
name[reporter].warning assign[=] constant[True]
name[reporter].guidelineCountDifference assign[=] constant[True]
variable[selfGuidelines] assign[=] list[[]]
variable[otherGuidelines] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c993c40>, <ast.Name object at 0x7da20c9905b0>]]] in starred[tuple[[<ast.Tuple object at 0x7da20c992e00>, <ast.Tuple object at 0x7da20c992410>]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c993cd0>, <ast.Name object at 0x7da20c992500>]]] in starred[call[name[enumerate], parameter[name[source].guidelines]]] begin[:]
call[name[names].append, parameter[tuple[[<ast.Attribute object at 0x7da20c9915a0>, <ast.Name object at 0x7da20c993fd0>]]]]
variable[guidelines1] assign[=] call[name[set], parameter[name[selfGuidelines]]]
variable[guidelines2] assign[=] call[name[set], parameter[name[otherGuidelines]]]
if compare[call[name[len], parameter[call[name[guidelines1].difference, parameter[name[guidelines2]]]]] not_equal[!=] constant[0]] begin[:]
name[reporter].warning assign[=] constant[True]
name[reporter].guidelinesMissingFromGlyph2 assign[=] call[name[list], parameter[call[name[guidelines1].difference, parameter[name[guidelines2]]]]]
if compare[call[name[len], parameter[call[name[guidelines2].difference, parameter[name[guidelines1]]]]] not_equal[!=] constant[0]] begin[:]
name[reporter].warning assign[=] constant[True]
name[reporter].guidelinesMissingFromGlyph1 assign[=] call[name[list], parameter[call[name[guidelines2].difference, parameter[name[guidelines1]]]]]
if compare[call[name[len], parameter[name[self].anchors]] not_equal[!=] call[name[len], parameter[name[glyph2].anchors]]] begin[:]
name[reporter].warning assign[=] constant[True]
name[reporter].anchorCountDifference assign[=] constant[True]
variable[anchor_diff] assign[=] list[[]]
variable[selfAnchors] assign[=] <ast.ListComp object at 0x7da20c9919f0>
variable[otherAnchors] assign[=] <ast.ListComp object at 0x7da20c990e50>
for taget[tuple[[<ast.Name object at 0x7da20c6aac80>, <ast.Tuple object at 0x7da20c6ab550>]]] in starred[call[name[enumerate], parameter[call[name[zip_longest], parameter[name[selfAnchors], name[otherAnchors]]]]]] begin[:]
if compare[name[left] not_equal[!=] name[right]] begin[:]
call[name[anchor_diff].append, parameter[tuple[[<ast.Name object at 0x7da20c6a85e0>, <ast.Name object at 0x7da20c6aabf0>, <ast.Name object at 0x7da20c6ab370>]]]]
if name[anchor_diff] begin[:]
name[reporter].warning assign[=] constant[True]
name[reporter].anchorDifferences assign[=] name[anchor_diff]
if <ast.BoolOp object at 0x7da20c6aa770> begin[:]
name[reporter].anchorOrderDifference assign[=] constant[True]
variable[selfAnchors_counted_set] assign[=] call[name[collections].Counter, parameter[name[selfAnchors]]]
variable[otherAnchors_counted_set] assign[=] call[name[collections].Counter, parameter[name[otherAnchors]]]
variable[missing_from_glyph1] assign[=] binary_operation[name[otherAnchors_counted_set] - name[selfAnchors_counted_set]]
if name[missing_from_glyph1] begin[:]
name[reporter].anchorsMissingFromGlyph1 assign[=] call[name[sorted], parameter[call[name[missing_from_glyph1].elements, parameter[]]]]
variable[missing_from_glyph2] assign[=] binary_operation[name[selfAnchors_counted_set] - name[otherAnchors_counted_set]]
if name[missing_from_glyph2] begin[:]
name[reporter].anchorsMissingFromGlyph2 assign[=] call[name[sorted], parameter[call[name[missing_from_glyph2].elements, parameter[]]]] | keyword[def] identifier[_isCompatible] ( identifier[self] , identifier[other] , identifier[reporter] ):
literal[string]
identifier[glyph1] = identifier[self]
identifier[glyph2] = identifier[other]
keyword[if] identifier[len] ( identifier[self] . identifier[contours] )!= identifier[len] ( identifier[glyph2] . identifier[contours] ):
identifier[reporter] . identifier[fatal] = keyword[True]
identifier[reporter] . identifier[contourCountDifference] = keyword[True]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[min] ( identifier[len] ( identifier[glyph1] ), identifier[len] ( identifier[glyph2] ))):
identifier[contour1] = identifier[glyph1] [ identifier[i] ]
identifier[contour2] = identifier[glyph2] [ identifier[i] ]
identifier[self] . identifier[_checkPairs] ( identifier[contour1] , identifier[contour2] , identifier[reporter] , identifier[reporter] . identifier[contours] )
keyword[if] identifier[len] ( identifier[glyph1] . identifier[components] )!= identifier[len] ( identifier[glyph2] . identifier[components] ):
identifier[reporter] . identifier[fatal] = keyword[True]
identifier[reporter] . identifier[componentCountDifference] = keyword[True]
identifier[component_diff] =[]
identifier[selfComponents] =[ identifier[component] . identifier[baseGlyph] keyword[for] identifier[component] keyword[in] identifier[glyph1] . identifier[components] ]
identifier[otherComponents] =[ identifier[component] . identifier[baseGlyph] keyword[for] identifier[component] keyword[in] identifier[glyph2] . identifier[components] ]
keyword[for] identifier[index] ,( identifier[left] , identifier[right] ) keyword[in] identifier[enumerate] (
identifier[zip_longest] ( identifier[selfComponents] , identifier[otherComponents] )
):
keyword[if] identifier[left] != identifier[right] :
identifier[component_diff] . identifier[append] (( identifier[index] , identifier[left] , identifier[right] ))
keyword[if] identifier[component_diff] :
identifier[reporter] . identifier[warning] = keyword[True]
identifier[reporter] . identifier[componentDifferences] = identifier[component_diff]
keyword[if] keyword[not] identifier[reporter] . identifier[componentCountDifference] keyword[and] identifier[set] ( identifier[selfComponents] )== identifier[set] (
identifier[otherComponents]
):
identifier[reporter] . identifier[componentOrderDifference] = keyword[True]
identifier[selfComponents_counted_set] = identifier[collections] . identifier[Counter] ( identifier[selfComponents] )
identifier[otherComponents_counted_set] = identifier[collections] . identifier[Counter] ( identifier[otherComponents] )
identifier[missing_from_glyph1] =(
identifier[otherComponents_counted_set] - identifier[selfComponents_counted_set]
)
keyword[if] identifier[missing_from_glyph1] :
identifier[reporter] . identifier[fatal] = keyword[True]
identifier[reporter] . identifier[componentsMissingFromGlyph1] = identifier[sorted] (
identifier[missing_from_glyph1] . identifier[elements] ()
)
identifier[missing_from_glyph2] =(
identifier[selfComponents_counted_set] - identifier[otherComponents_counted_set]
)
keyword[if] identifier[missing_from_glyph2] :
identifier[reporter] . identifier[fatal] = keyword[True]
identifier[reporter] . identifier[componentsMissingFromGlyph2] = identifier[sorted] (
identifier[missing_from_glyph2] . identifier[elements] ()
)
keyword[if] identifier[len] ( identifier[self] . identifier[guidelines] )!= identifier[len] ( identifier[glyph2] . identifier[guidelines] ):
identifier[reporter] . identifier[warning] = keyword[True]
identifier[reporter] . identifier[guidelineCountDifference] = keyword[True]
identifier[selfGuidelines] =[]
identifier[otherGuidelines] =[]
keyword[for] identifier[source] , identifier[names] keyword[in] (( identifier[self] , identifier[selfGuidelines] ),
( identifier[other] , identifier[otherGuidelines] )):
keyword[for] identifier[i] , identifier[guideline] keyword[in] identifier[enumerate] ( identifier[source] . identifier[guidelines] ):
identifier[names] . identifier[append] (( identifier[guideline] . identifier[name] , identifier[i] ))
identifier[guidelines1] = identifier[set] ( identifier[selfGuidelines] )
identifier[guidelines2] = identifier[set] ( identifier[otherGuidelines] )
keyword[if] identifier[len] ( identifier[guidelines1] . identifier[difference] ( identifier[guidelines2] ))!= literal[int] :
identifier[reporter] . identifier[warning] = keyword[True]
identifier[reporter] . identifier[guidelinesMissingFromGlyph2] = identifier[list] (
identifier[guidelines1] . identifier[difference] ( identifier[guidelines2] ))
keyword[if] identifier[len] ( identifier[guidelines2] . identifier[difference] ( identifier[guidelines1] ))!= literal[int] :
identifier[reporter] . identifier[warning] = keyword[True]
identifier[reporter] . identifier[guidelinesMissingFromGlyph1] = identifier[list] (
identifier[guidelines2] . identifier[difference] ( identifier[guidelines1] ))
keyword[if] identifier[len] ( identifier[self] . identifier[anchors] )!= identifier[len] ( identifier[glyph2] . identifier[anchors] ):
identifier[reporter] . identifier[warning] = keyword[True]
identifier[reporter] . identifier[anchorCountDifference] = keyword[True]
identifier[anchor_diff] =[]
identifier[selfAnchors] =[ identifier[anchor] . identifier[name] keyword[for] identifier[anchor] keyword[in] identifier[glyph1] . identifier[anchors] ]
identifier[otherAnchors] =[ identifier[anchor] . identifier[name] keyword[for] identifier[anchor] keyword[in] identifier[glyph2] . identifier[anchors] ]
keyword[for] identifier[index] ,( identifier[left] , identifier[right] ) keyword[in] identifier[enumerate] ( identifier[zip_longest] ( identifier[selfAnchors] , identifier[otherAnchors] )):
keyword[if] identifier[left] != identifier[right] :
identifier[anchor_diff] . identifier[append] (( identifier[index] , identifier[left] , identifier[right] ))
keyword[if] identifier[anchor_diff] :
identifier[reporter] . identifier[warning] = keyword[True]
identifier[reporter] . identifier[anchorDifferences] = identifier[anchor_diff]
keyword[if] keyword[not] identifier[reporter] . identifier[anchorCountDifference] keyword[and] identifier[set] ( identifier[selfAnchors] )== identifier[set] (
identifier[otherAnchors]
):
identifier[reporter] . identifier[anchorOrderDifference] = keyword[True]
identifier[selfAnchors_counted_set] = identifier[collections] . identifier[Counter] ( identifier[selfAnchors] )
identifier[otherAnchors_counted_set] = identifier[collections] . identifier[Counter] ( identifier[otherAnchors] )
identifier[missing_from_glyph1] = identifier[otherAnchors_counted_set] - identifier[selfAnchors_counted_set]
keyword[if] identifier[missing_from_glyph1] :
identifier[reporter] . identifier[anchorsMissingFromGlyph1] = identifier[sorted] (
identifier[missing_from_glyph1] . identifier[elements] ()
)
identifier[missing_from_glyph2] = identifier[selfAnchors_counted_set] - identifier[otherAnchors_counted_set]
keyword[if] identifier[missing_from_glyph2] :
identifier[reporter] . identifier[anchorsMissingFromGlyph2] = identifier[sorted] (
identifier[missing_from_glyph2] . identifier[elements] ()
) | def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseGlyph.isCompatible`.
Subclasses may override this method.
"""
glyph1 = self
glyph2 = other
# contour count
if len(self.contours) != len(glyph2.contours):
reporter.fatal = True
reporter.contourCountDifference = True # depends on [control=['if'], data=[]]
# contour pairs
for i in range(min(len(glyph1), len(glyph2))):
contour1 = glyph1[i]
contour2 = glyph2[i]
self._checkPairs(contour1, contour2, reporter, reporter.contours) # depends on [control=['for'], data=['i']]
# component count
if len(glyph1.components) != len(glyph2.components):
reporter.fatal = True
reporter.componentCountDifference = True # depends on [control=['if'], data=[]]
# component check
component_diff = []
selfComponents = [component.baseGlyph for component in glyph1.components]
otherComponents = [component.baseGlyph for component in glyph2.components]
for (index, (left, right)) in enumerate(zip_longest(selfComponents, otherComponents)):
if left != right:
component_diff.append((index, left, right)) # depends on [control=['if'], data=['left', 'right']] # depends on [control=['for'], data=[]]
if component_diff:
reporter.warning = True
reporter.componentDifferences = component_diff
if not reporter.componentCountDifference and set(selfComponents) == set(otherComponents):
reporter.componentOrderDifference = True # depends on [control=['if'], data=[]]
selfComponents_counted_set = collections.Counter(selfComponents)
otherComponents_counted_set = collections.Counter(otherComponents)
missing_from_glyph1 = otherComponents_counted_set - selfComponents_counted_set
if missing_from_glyph1:
reporter.fatal = True
reporter.componentsMissingFromGlyph1 = sorted(missing_from_glyph1.elements()) # depends on [control=['if'], data=[]]
missing_from_glyph2 = selfComponents_counted_set - otherComponents_counted_set
if missing_from_glyph2:
reporter.fatal = True
reporter.componentsMissingFromGlyph2 = sorted(missing_from_glyph2.elements()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# guideline count
if len(self.guidelines) != len(glyph2.guidelines):
reporter.warning = True
reporter.guidelineCountDifference = True # depends on [control=['if'], data=[]]
# guideline check
selfGuidelines = []
otherGuidelines = []
for (source, names) in ((self, selfGuidelines), (other, otherGuidelines)):
for (i, guideline) in enumerate(source.guidelines):
names.append((guideline.name, i)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
guidelines1 = set(selfGuidelines)
guidelines2 = set(otherGuidelines)
if len(guidelines1.difference(guidelines2)) != 0:
reporter.warning = True
reporter.guidelinesMissingFromGlyph2 = list(guidelines1.difference(guidelines2)) # depends on [control=['if'], data=[]]
if len(guidelines2.difference(guidelines1)) != 0:
reporter.warning = True
reporter.guidelinesMissingFromGlyph1 = list(guidelines2.difference(guidelines1)) # depends on [control=['if'], data=[]]
# anchor count
if len(self.anchors) != len(glyph2.anchors):
reporter.warning = True
reporter.anchorCountDifference = True # depends on [control=['if'], data=[]]
# anchor check
anchor_diff = []
selfAnchors = [anchor.name for anchor in glyph1.anchors]
otherAnchors = [anchor.name for anchor in glyph2.anchors]
for (index, (left, right)) in enumerate(zip_longest(selfAnchors, otherAnchors)):
if left != right:
anchor_diff.append((index, left, right)) # depends on [control=['if'], data=['left', 'right']] # depends on [control=['for'], data=[]]
if anchor_diff:
reporter.warning = True
reporter.anchorDifferences = anchor_diff
if not reporter.anchorCountDifference and set(selfAnchors) == set(otherAnchors):
reporter.anchorOrderDifference = True # depends on [control=['if'], data=[]]
selfAnchors_counted_set = collections.Counter(selfAnchors)
otherAnchors_counted_set = collections.Counter(otherAnchors)
missing_from_glyph1 = otherAnchors_counted_set - selfAnchors_counted_set
if missing_from_glyph1:
reporter.anchorsMissingFromGlyph1 = sorted(missing_from_glyph1.elements()) # depends on [control=['if'], data=[]]
missing_from_glyph2 = selfAnchors_counted_set - otherAnchors_counted_set
if missing_from_glyph2:
reporter.anchorsMissingFromGlyph2 = sorted(missing_from_glyph2.elements()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def read_chd_header(chd_file):
"""
read the .chd header file created when Vision Research software saves the images in a file format other than .cine
"""
with open(chd_file, "rb") as f:
header = {
"cinefileheader": cine.CINEFILEHEADER(),
"bitmapinfoheader": cine.BITMAPINFOHEADER(),
"setup": cine.SETUP(),
}
f.readinto(header["cinefileheader"])
f.readinto(header["bitmapinfoheader"])
f.readinto(header["setup"])
return header | def function[read_chd_header, parameter[chd_file]]:
constant[
read the .chd header file created when Vision Research software saves the images in a file format other than .cine
]
with call[name[open], parameter[name[chd_file], constant[rb]]] begin[:]
variable[header] assign[=] dictionary[[<ast.Constant object at 0x7da1b0505c90>, <ast.Constant object at 0x7da1b0505630>, <ast.Constant object at 0x7da1b0505ab0>], [<ast.Call object at 0x7da1b0506200>, <ast.Call object at 0x7da1b05053c0>, <ast.Call object at 0x7da1b0505d20>]]
call[name[f].readinto, parameter[call[name[header]][constant[cinefileheader]]]]
call[name[f].readinto, parameter[call[name[header]][constant[bitmapinfoheader]]]]
call[name[f].readinto, parameter[call[name[header]][constant[setup]]]]
return[name[header]] | keyword[def] identifier[read_chd_header] ( identifier[chd_file] ):
literal[string]
keyword[with] identifier[open] ( identifier[chd_file] , literal[string] ) keyword[as] identifier[f] :
identifier[header] ={
literal[string] : identifier[cine] . identifier[CINEFILEHEADER] (),
literal[string] : identifier[cine] . identifier[BITMAPINFOHEADER] (),
literal[string] : identifier[cine] . identifier[SETUP] (),
}
identifier[f] . identifier[readinto] ( identifier[header] [ literal[string] ])
identifier[f] . identifier[readinto] ( identifier[header] [ literal[string] ])
identifier[f] . identifier[readinto] ( identifier[header] [ literal[string] ])
keyword[return] identifier[header] | def read_chd_header(chd_file):
"""
read the .chd header file created when Vision Research software saves the images in a file format other than .cine
"""
with open(chd_file, 'rb') as f:
header = {'cinefileheader': cine.CINEFILEHEADER(), 'bitmapinfoheader': cine.BITMAPINFOHEADER(), 'setup': cine.SETUP()}
f.readinto(header['cinefileheader'])
f.readinto(header['bitmapinfoheader'])
f.readinto(header['setup']) # depends on [control=['with'], data=['f']]
return header |
def check(self, action, page=None, lang=None, method=None):
"""Return ``True`` if the current user has permission on the page."""
if self.user.is_superuser:
return True
if action == 'change':
return self.has_change_permission(page, lang, method)
if action == 'delete':
if not self.delete_page():
return False
return True
if action == 'add':
if not self.add_page():
return False
return True
if action == 'freeze':
perm = self.user.has_perm('pages.can_freeze')
if perm:
return True
return False
if action == 'publish':
perm = self.user.has_perm('pages.can_publish')
if perm:
return True
return False
return False | def function[check, parameter[self, action, page, lang, method]]:
constant[Return ``True`` if the current user has permission on the page.]
if name[self].user.is_superuser begin[:]
return[constant[True]]
if compare[name[action] equal[==] constant[change]] begin[:]
return[call[name[self].has_change_permission, parameter[name[page], name[lang], name[method]]]]
if compare[name[action] equal[==] constant[delete]] begin[:]
if <ast.UnaryOp object at 0x7da1b2347f10> begin[:]
return[constant[False]]
return[constant[True]]
if compare[name[action] equal[==] constant[add]] begin[:]
if <ast.UnaryOp object at 0x7da1b2347d90> begin[:]
return[constant[False]]
return[constant[True]]
if compare[name[action] equal[==] constant[freeze]] begin[:]
variable[perm] assign[=] call[name[self].user.has_perm, parameter[constant[pages.can_freeze]]]
if name[perm] begin[:]
return[constant[True]]
return[constant[False]]
if compare[name[action] equal[==] constant[publish]] begin[:]
variable[perm] assign[=] call[name[self].user.has_perm, parameter[constant[pages.can_publish]]]
if name[perm] begin[:]
return[constant[True]]
return[constant[False]]
return[constant[False]] | keyword[def] identifier[check] ( identifier[self] , identifier[action] , identifier[page] = keyword[None] , identifier[lang] = keyword[None] , identifier[method] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[user] . identifier[is_superuser] :
keyword[return] keyword[True]
keyword[if] identifier[action] == literal[string] :
keyword[return] identifier[self] . identifier[has_change_permission] ( identifier[page] , identifier[lang] , identifier[method] )
keyword[if] identifier[action] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[delete_page] ():
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[if] identifier[action] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[add_page] ():
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[if] identifier[action] == literal[string] :
identifier[perm] = identifier[self] . identifier[user] . identifier[has_perm] ( literal[string] )
keyword[if] identifier[perm] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[if] identifier[action] == literal[string] :
identifier[perm] = identifier[self] . identifier[user] . identifier[has_perm] ( literal[string] )
keyword[if] identifier[perm] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[return] keyword[False] | def check(self, action, page=None, lang=None, method=None):
"""Return ``True`` if the current user has permission on the page."""
if self.user.is_superuser:
return True # depends on [control=['if'], data=[]]
if action == 'change':
return self.has_change_permission(page, lang, method) # depends on [control=['if'], data=[]]
if action == 'delete':
if not self.delete_page():
return False # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=[]]
if action == 'add':
if not self.add_page():
return False # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=[]]
if action == 'freeze':
perm = self.user.has_perm('pages.can_freeze')
if perm:
return True # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=[]]
if action == 'publish':
perm = self.user.has_perm('pages.can_publish')
if perm:
return True # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=[]]
return False |
def list_task_definitions(self):
"""
Filtering not implemented
"""
task_arns = []
for task_definition_list in self.task_definitions.values():
task_arns.extend(
[task_definition.arn for task_definition in task_definition_list])
return task_arns | def function[list_task_definitions, parameter[self]]:
constant[
Filtering not implemented
]
variable[task_arns] assign[=] list[[]]
for taget[name[task_definition_list]] in starred[call[name[self].task_definitions.values, parameter[]]] begin[:]
call[name[task_arns].extend, parameter[<ast.ListComp object at 0x7da18fe91c30>]]
return[name[task_arns]] | keyword[def] identifier[list_task_definitions] ( identifier[self] ):
literal[string]
identifier[task_arns] =[]
keyword[for] identifier[task_definition_list] keyword[in] identifier[self] . identifier[task_definitions] . identifier[values] ():
identifier[task_arns] . identifier[extend] (
[ identifier[task_definition] . identifier[arn] keyword[for] identifier[task_definition] keyword[in] identifier[task_definition_list] ])
keyword[return] identifier[task_arns] | def list_task_definitions(self):
"""
Filtering not implemented
"""
task_arns = []
for task_definition_list in self.task_definitions.values():
task_arns.extend([task_definition.arn for task_definition in task_definition_list]) # depends on [control=['for'], data=['task_definition_list']]
return task_arns |
def parseTextModeTimeStr(timeStr):
""" Parses the specified SMS text mode time string
The time stamp format is "yy/MM/dd,hh:mm:ss±zz"
(yy = year, MM = month, dd = day, hh = hour, mm = minute, ss = second, zz = time zone
[Note: the unit of time zone is a quarter of an hour])
:param timeStr: The time string to parse
:type timeStr: str
:return: datetime object representing the specified time string
:rtype: datetime.datetime
"""
msgTime = timeStr[:-3]
tzOffsetHours = int(int(timeStr[-3:]) * 0.25)
return datetime.strptime(msgTime, '%y/%m/%d,%H:%M:%S').replace(tzinfo=SimpleOffsetTzInfo(tzOffsetHours)) | def function[parseTextModeTimeStr, parameter[timeStr]]:
constant[ Parses the specified SMS text mode time string
The time stamp format is "yy/MM/dd,hh:mm:ss±zz"
(yy = year, MM = month, dd = day, hh = hour, mm = minute, ss = second, zz = time zone
[Note: the unit of time zone is a quarter of an hour])
:param timeStr: The time string to parse
:type timeStr: str
:return: datetime object representing the specified time string
:rtype: datetime.datetime
]
variable[msgTime] assign[=] call[name[timeStr]][<ast.Slice object at 0x7da1b1529720>]
variable[tzOffsetHours] assign[=] call[name[int], parameter[binary_operation[call[name[int], parameter[call[name[timeStr]][<ast.Slice object at 0x7da18f58e5f0>]]] * constant[0.25]]]]
return[call[call[name[datetime].strptime, parameter[name[msgTime], constant[%y/%m/%d,%H:%M:%S]]].replace, parameter[]]] | keyword[def] identifier[parseTextModeTimeStr] ( identifier[timeStr] ):
literal[string]
identifier[msgTime] = identifier[timeStr] [:- literal[int] ]
identifier[tzOffsetHours] = identifier[int] ( identifier[int] ( identifier[timeStr] [- literal[int] :])* literal[int] )
keyword[return] identifier[datetime] . identifier[strptime] ( identifier[msgTime] , literal[string] ). identifier[replace] ( identifier[tzinfo] = identifier[SimpleOffsetTzInfo] ( identifier[tzOffsetHours] )) | def parseTextModeTimeStr(timeStr):
""" Parses the specified SMS text mode time string
The time stamp format is "yy/MM/dd,hh:mm:ss±zz"
(yy = year, MM = month, dd = day, hh = hour, mm = minute, ss = second, zz = time zone
[Note: the unit of time zone is a quarter of an hour])
:param timeStr: The time string to parse
:type timeStr: str
:return: datetime object representing the specified time string
:rtype: datetime.datetime
"""
msgTime = timeStr[:-3]
tzOffsetHours = int(int(timeStr[-3:]) * 0.25)
return datetime.strptime(msgTime, '%y/%m/%d,%H:%M:%S').replace(tzinfo=SimpleOffsetTzInfo(tzOffsetHours)) |
def alias_authorization(self, authorization_id, alias_id):
"""Adds an ``Id`` to an ``Authorization`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Authorization`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another authorization. it
is reassigned to the given authorization ``Id``.
arg: authorization_id (osid.id.Id): the ``Id`` of an
``Authorization``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``authorization_id`` not found
raise: NullArgument - ``authorization_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=authorization_id, equivalent_id=alias_id) | def function[alias_authorization, parameter[self, authorization_id, alias_id]]:
constant[Adds an ``Id`` to an ``Authorization`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Authorization`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another authorization. it
is reassigned to the given authorization ``Id``.
arg: authorization_id (osid.id.Id): the ``Id`` of an
``Authorization``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``authorization_id`` not found
raise: NullArgument - ``authorization_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
call[name[self]._alias_id, parameter[]] | keyword[def] identifier[alias_authorization] ( identifier[self] , identifier[authorization_id] , identifier[alias_id] ):
literal[string]
identifier[self] . identifier[_alias_id] ( identifier[primary_id] = identifier[authorization_id] , identifier[equivalent_id] = identifier[alias_id] ) | def alias_authorization(self, authorization_id, alias_id):
"""Adds an ``Id`` to an ``Authorization`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Authorization`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another authorization. it
is reassigned to the given authorization ``Id``.
arg: authorization_id (osid.id.Id): the ``Id`` of an
``Authorization``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``authorization_id`` not found
raise: NullArgument - ``authorization_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=authorization_id, equivalent_id=alias_id) |
def update_internal_networks(self, network_uri_list, force=False, timeout=-1):
"""
Updates internal networks on the logical interconnect.
Args:
network_uri_list: List of Ethernet network uris.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
"""
uri = "{}/internalNetworks".format(self.data["uri"])
return self._helper.update(network_uri_list, uri=uri, force=force, timeout=timeout) | def function[update_internal_networks, parameter[self, network_uri_list, force, timeout]]:
constant[
Updates internal networks on the logical interconnect.
Args:
network_uri_list: List of Ethernet network uris.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
]
variable[uri] assign[=] call[constant[{}/internalNetworks].format, parameter[call[name[self].data][constant[uri]]]]
return[call[name[self]._helper.update, parameter[name[network_uri_list]]]] | keyword[def] identifier[update_internal_networks] ( identifier[self] , identifier[network_uri_list] , identifier[force] = keyword[False] , identifier[timeout] =- literal[int] ):
literal[string]
identifier[uri] = literal[string] . identifier[format] ( identifier[self] . identifier[data] [ literal[string] ])
keyword[return] identifier[self] . identifier[_helper] . identifier[update] ( identifier[network_uri_list] , identifier[uri] = identifier[uri] , identifier[force] = identifier[force] , identifier[timeout] = identifier[timeout] ) | def update_internal_networks(self, network_uri_list, force=False, timeout=-1):
"""
Updates internal networks on the logical interconnect.
Args:
network_uri_list: List of Ethernet network uris.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
"""
uri = '{}/internalNetworks'.format(self.data['uri'])
return self._helper.update(network_uri_list, uri=uri, force=force, timeout=timeout) |
def MultiOpenOrdered(self, urns, **kwargs):
"""Opens many URNs and returns handles in the same order.
`MultiOpen` can return file handles in arbitrary order. This makes it more
efficient and in most cases the order does not matter. However, there are
cases where order is important and this function should be used instead.
Args:
urns: A list of URNs to open.
**kwargs: Same keyword arguments as in `MultiOpen`.
Returns:
A list of file-like objects corresponding to the specified URNs.
Raises:
IOError: If one of the specified URNs does not correspond to the AFF4
object.
"""
precondition.AssertIterableType(urns, rdfvalue.RDFURN)
urn_filedescs = {}
for filedesc in self.MultiOpen(urns, **kwargs):
urn_filedescs[filedesc.urn] = filedesc
filedescs = []
for urn in urns:
try:
filedescs.append(urn_filedescs[urn])
except KeyError:
raise IOError("No associated AFF4 object for `%s`" % urn)
return filedescs | def function[MultiOpenOrdered, parameter[self, urns]]:
constant[Opens many URNs and returns handles in the same order.
`MultiOpen` can return file handles in arbitrary order. This makes it more
efficient and in most cases the order does not matter. However, there are
cases where order is important and this function should be used instead.
Args:
urns: A list of URNs to open.
**kwargs: Same keyword arguments as in `MultiOpen`.
Returns:
A list of file-like objects corresponding to the specified URNs.
Raises:
IOError: If one of the specified URNs does not correspond to the AFF4
object.
]
call[name[precondition].AssertIterableType, parameter[name[urns], name[rdfvalue].RDFURN]]
variable[urn_filedescs] assign[=] dictionary[[], []]
for taget[name[filedesc]] in starred[call[name[self].MultiOpen, parameter[name[urns]]]] begin[:]
call[name[urn_filedescs]][name[filedesc].urn] assign[=] name[filedesc]
variable[filedescs] assign[=] list[[]]
for taget[name[urn]] in starred[name[urns]] begin[:]
<ast.Try object at 0x7da18f58fd30>
return[name[filedescs]] | keyword[def] identifier[MultiOpenOrdered] ( identifier[self] , identifier[urns] ,** identifier[kwargs] ):
literal[string]
identifier[precondition] . identifier[AssertIterableType] ( identifier[urns] , identifier[rdfvalue] . identifier[RDFURN] )
identifier[urn_filedescs] ={}
keyword[for] identifier[filedesc] keyword[in] identifier[self] . identifier[MultiOpen] ( identifier[urns] ,** identifier[kwargs] ):
identifier[urn_filedescs] [ identifier[filedesc] . identifier[urn] ]= identifier[filedesc]
identifier[filedescs] =[]
keyword[for] identifier[urn] keyword[in] identifier[urns] :
keyword[try] :
identifier[filedescs] . identifier[append] ( identifier[urn_filedescs] [ identifier[urn] ])
keyword[except] identifier[KeyError] :
keyword[raise] identifier[IOError] ( literal[string] % identifier[urn] )
keyword[return] identifier[filedescs] | def MultiOpenOrdered(self, urns, **kwargs):
"""Opens many URNs and returns handles in the same order.
`MultiOpen` can return file handles in arbitrary order. This makes it more
efficient and in most cases the order does not matter. However, there are
cases where order is important and this function should be used instead.
Args:
urns: A list of URNs to open.
**kwargs: Same keyword arguments as in `MultiOpen`.
Returns:
A list of file-like objects corresponding to the specified URNs.
Raises:
IOError: If one of the specified URNs does not correspond to the AFF4
object.
"""
precondition.AssertIterableType(urns, rdfvalue.RDFURN)
urn_filedescs = {}
for filedesc in self.MultiOpen(urns, **kwargs):
urn_filedescs[filedesc.urn] = filedesc # depends on [control=['for'], data=['filedesc']]
filedescs = []
for urn in urns:
try:
filedescs.append(urn_filedescs[urn]) # depends on [control=['try'], data=[]]
except KeyError:
raise IOError('No associated AFF4 object for `%s`' % urn) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['urn']]
return filedescs |
def schedule_snapshot(self, format):
"""
Tell the canvas to perform a snapshot when it's finished rendering
:param format:
:return:
"""
bot = self.bot
canvas = self.bot.canvas
script = bot._namespace['__file__']
if script:
filename = os.path.splitext(script)[0] + '.' + format
else:
filename = 'output.' + format
f = canvas.output_closure(filename, self.bot._frame)
self.scheduled_snapshots.append(f) | def function[schedule_snapshot, parameter[self, format]]:
constant[
Tell the canvas to perform a snapshot when it's finished rendering
:param format:
:return:
]
variable[bot] assign[=] name[self].bot
variable[canvas] assign[=] name[self].bot.canvas
variable[script] assign[=] call[name[bot]._namespace][constant[__file__]]
if name[script] begin[:]
variable[filename] assign[=] binary_operation[binary_operation[call[call[name[os].path.splitext, parameter[name[script]]]][constant[0]] + constant[.]] + name[format]]
variable[f] assign[=] call[name[canvas].output_closure, parameter[name[filename], name[self].bot._frame]]
call[name[self].scheduled_snapshots.append, parameter[name[f]]] | keyword[def] identifier[schedule_snapshot] ( identifier[self] , identifier[format] ):
literal[string]
identifier[bot] = identifier[self] . identifier[bot]
identifier[canvas] = identifier[self] . identifier[bot] . identifier[canvas]
identifier[script] = identifier[bot] . identifier[_namespace] [ literal[string] ]
keyword[if] identifier[script] :
identifier[filename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[script] )[ literal[int] ]+ literal[string] + identifier[format]
keyword[else] :
identifier[filename] = literal[string] + identifier[format]
identifier[f] = identifier[canvas] . identifier[output_closure] ( identifier[filename] , identifier[self] . identifier[bot] . identifier[_frame] )
identifier[self] . identifier[scheduled_snapshots] . identifier[append] ( identifier[f] ) | def schedule_snapshot(self, format):
"""
Tell the canvas to perform a snapshot when it's finished rendering
:param format:
:return:
"""
bot = self.bot
canvas = self.bot.canvas
script = bot._namespace['__file__']
if script:
filename = os.path.splitext(script)[0] + '.' + format # depends on [control=['if'], data=[]]
else:
filename = 'output.' + format
f = canvas.output_closure(filename, self.bot._frame)
self.scheduled_snapshots.append(f) |
def mouseMoveEvent(self, event):
""" Handle the mouse move event for a drag operation.
"""
#if event.buttons() & Qt.LeftButton and self._drag_origin is not None:
#dist = (event.pos() - self._drag_origin).manhattanLength()
#if dist >= QApplication.startDragDistance():
#self.do_drag(event.widget())
#self._drag_origin = None
#return # Don't returns
widget = self.widget
type(widget).mouseMoveEvent(widget, event) | def function[mouseMoveEvent, parameter[self, event]]:
constant[ Handle the mouse move event for a drag operation.
]
variable[widget] assign[=] name[self].widget
call[call[name[type], parameter[name[widget]]].mouseMoveEvent, parameter[name[widget], name[event]]] | keyword[def] identifier[mouseMoveEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[widget] = identifier[self] . identifier[widget]
identifier[type] ( identifier[widget] ). identifier[mouseMoveEvent] ( identifier[widget] , identifier[event] ) | def mouseMoveEvent(self, event):
""" Handle the mouse move event for a drag operation.
"""
#if event.buttons() & Qt.LeftButton and self._drag_origin is not None:
#dist = (event.pos() - self._drag_origin).manhattanLength()
#if dist >= QApplication.startDragDistance():
#self.do_drag(event.widget())
#self._drag_origin = None
#return # Don't returns
widget = self.widget
type(widget).mouseMoveEvent(widget, event) |
def partition_zero(max_range, max_sum, out = None, n_part = None):
'''
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
'''
if out is None:
max_range = np.asarray(max_range, dtype = int).ravel()
n_part = number_of_partitions(max_range, max_sum)
out = np.zeros(shape = (n_part[0], max_range.size), dtype = int)
if(max_range.size == 1):
out[:] = np.arange(min(max_range[0],max_sum) + 1, dtype = int).reshape(-1,1)
return out
P = partition_zero(max_range[1:], max_sum, out=out[:n_part[1],1:], n_part = n_part[1:])
S = np.minimum(max_sum - P.sum(axis = 1), max_range[0])
offset, sz = 0, S.size
out[:sz,0] = 0
for i in range(1, max_range[0]+1):
ind, = np.nonzero(S)
offset, sz = offset + sz, ind.size
out[offset:offset+sz, 0] = i
out[offset:offset+sz, 1:] = P[ind]
S[ind] -= 1
return out | def function[partition_zero, parameter[max_range, max_sum, out, n_part]]:
constant[
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
]
if compare[name[out] is constant[None]] begin[:]
variable[max_range] assign[=] call[call[name[np].asarray, parameter[name[max_range]]].ravel, parameter[]]
variable[n_part] assign[=] call[name[number_of_partitions], parameter[name[max_range], name[max_sum]]]
variable[out] assign[=] call[name[np].zeros, parameter[]]
if compare[name[max_range].size equal[==] constant[1]] begin[:]
call[name[out]][<ast.Slice object at 0x7da204347820>] assign[=] call[call[name[np].arange, parameter[binary_operation[call[name[min], parameter[call[name[max_range]][constant[0]], name[max_sum]]] + constant[1]]]].reshape, parameter[<ast.UnaryOp object at 0x7da204346b00>, constant[1]]]
return[name[out]]
variable[P] assign[=] call[name[partition_zero], parameter[call[name[max_range]][<ast.Slice object at 0x7da18fe921a0>], name[max_sum]]]
variable[S] assign[=] call[name[np].minimum, parameter[binary_operation[name[max_sum] - call[name[P].sum, parameter[]]], call[name[max_range]][constant[0]]]]
<ast.Tuple object at 0x7da18fe93400> assign[=] tuple[[<ast.Constant object at 0x7da18fe90a90>, <ast.Attribute object at 0x7da18fe936d0>]]
call[name[out]][tuple[[<ast.Slice object at 0x7da18fe93f10>, <ast.Constant object at 0x7da18fe91240>]]] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[call[name[max_range]][constant[0]] + constant[1]]]]] begin[:]
<ast.Tuple object at 0x7da18fe912d0> assign[=] call[name[np].nonzero, parameter[name[S]]]
<ast.Tuple object at 0x7da18fe92620> assign[=] tuple[[<ast.BinOp object at 0x7da18fe93820>, <ast.Attribute object at 0x7da18fe906a0>]]
call[name[out]][tuple[[<ast.Slice object at 0x7da18fe92290>, <ast.Constant object at 0x7da18fe90a60>]]] assign[=] name[i]
call[name[out]][tuple[[<ast.Slice object at 0x7da18fe92b00>, <ast.Slice object at 0x7da18fe93700>]]] assign[=] call[name[P]][name[ind]]
<ast.AugAssign object at 0x7da18fe93e50>
return[name[out]] | keyword[def] identifier[partition_zero] ( identifier[max_range] , identifier[max_sum] , identifier[out] = keyword[None] , identifier[n_part] = keyword[None] ):
literal[string]
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[max_range] = identifier[np] . identifier[asarray] ( identifier[max_range] , identifier[dtype] = identifier[int] ). identifier[ravel] ()
identifier[n_part] = identifier[number_of_partitions] ( identifier[max_range] , identifier[max_sum] )
identifier[out] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[n_part] [ literal[int] ], identifier[max_range] . identifier[size] ), identifier[dtype] = identifier[int] )
keyword[if] ( identifier[max_range] . identifier[size] == literal[int] ):
identifier[out] [:]= identifier[np] . identifier[arange] ( identifier[min] ( identifier[max_range] [ literal[int] ], identifier[max_sum] )+ literal[int] , identifier[dtype] = identifier[int] ). identifier[reshape] (- literal[int] , literal[int] )
keyword[return] identifier[out]
identifier[P] = identifier[partition_zero] ( identifier[max_range] [ literal[int] :], identifier[max_sum] , identifier[out] = identifier[out] [: identifier[n_part] [ literal[int] ], literal[int] :], identifier[n_part] = identifier[n_part] [ literal[int] :])
identifier[S] = identifier[np] . identifier[minimum] ( identifier[max_sum] - identifier[P] . identifier[sum] ( identifier[axis] = literal[int] ), identifier[max_range] [ literal[int] ])
identifier[offset] , identifier[sz] = literal[int] , identifier[S] . identifier[size]
identifier[out] [: identifier[sz] , literal[int] ]= literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[max_range] [ literal[int] ]+ literal[int] ):
identifier[ind] ,= identifier[np] . identifier[nonzero] ( identifier[S] )
identifier[offset] , identifier[sz] = identifier[offset] + identifier[sz] , identifier[ind] . identifier[size]
identifier[out] [ identifier[offset] : identifier[offset] + identifier[sz] , literal[int] ]= identifier[i]
identifier[out] [ identifier[offset] : identifier[offset] + identifier[sz] , literal[int] :]= identifier[P] [ identifier[ind] ]
identifier[S] [ identifier[ind] ]-= literal[int]
keyword[return] identifier[out] | def partition_zero(max_range, max_sum, out=None, n_part=None):
"""
Function that can be helpful for obtaining the state space of a discrete Markov chain or Markov decision processes.
Returns a 2d-array with on the rows all possible partitions of the ranges `0,...,max_range[j]` that add up to at most `max_sum`.
Code due to ptrj, see http://stackoverflow.com/a/36563744/1479342.
Parameters
----------
max_range : array or list of ints
Gives the ranges for each element in the output array. Element `j` has range `np.arange(max_range[j]+1)`.
max_sum : int
The maximum sum for each partition in the output array.
Returns
-------
out : array
2d array with all possible partitions of the ranges `0,...,max_range[j]` summing up to at most `max_sum`.
Example
-------
>>> max_range=np.array([1,3,2])
>>> max_sum = 3
>>> partition_zero(max_range,max_sum)
array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[1, 0, 0],
[1, 0, 1],
[1, 0, 2],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0]])
"""
if out is None:
max_range = np.asarray(max_range, dtype=int).ravel()
n_part = number_of_partitions(max_range, max_sum)
out = np.zeros(shape=(n_part[0], max_range.size), dtype=int) # depends on [control=['if'], data=['out']]
if max_range.size == 1:
out[:] = np.arange(min(max_range[0], max_sum) + 1, dtype=int).reshape(-1, 1)
return out # depends on [control=['if'], data=[]]
P = partition_zero(max_range[1:], max_sum, out=out[:n_part[1], 1:], n_part=n_part[1:])
S = np.minimum(max_sum - P.sum(axis=1), max_range[0])
(offset, sz) = (0, S.size)
out[:sz, 0] = 0
for i in range(1, max_range[0] + 1):
(ind,) = np.nonzero(S)
(offset, sz) = (offset + sz, ind.size)
out[offset:offset + sz, 0] = i
out[offset:offset + sz, 1:] = P[ind]
S[ind] -= 1 # depends on [control=['for'], data=['i']]
return out |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""
Download SuperDARN data from Virginia Tech organized for loading by pysat.
"""
import sys
import os
import pysftp
import davitpy
if user is None:
user = os.environ['DBREADUSER']
if password is None:
password = os.environ['DBREADPASS']
with pysftp.Connection(
os.environ['VTDB'],
username=user,
password=password) as sftp:
for date in date_array:
myDir = '/data/'+date.strftime("%Y")+'/grdex/'+tag+'/'
fname = date.strftime("%Y%m%d")+'.' + tag + '.grdex'
local_fname = fname+'.bz2'
saved_fname = os.path.join(data_path,local_fname)
full_fname = os.path.join(data_path,fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
sftp.get(myDir+local_fname, saved_fname)
os.system('bunzip2 -c '+saved_fname+' > '+ full_fname)
os.system('rm ' + saved_fname)
except IOError:
print('File not available for '+date.strftime('%D'))
return | def function[download, parameter[date_array, tag, sat_id, data_path, user, password]]:
constant[
Download SuperDARN data from Virginia Tech organized for loading by pysat.
]
import module[sys]
import module[os]
import module[pysftp]
import module[davitpy]
if compare[name[user] is constant[None]] begin[:]
variable[user] assign[=] call[name[os].environ][constant[DBREADUSER]]
if compare[name[password] is constant[None]] begin[:]
variable[password] assign[=] call[name[os].environ][constant[DBREADPASS]]
with call[name[pysftp].Connection, parameter[call[name[os].environ][constant[VTDB]]]] begin[:]
for taget[name[date]] in starred[name[date_array]] begin[:]
variable[myDir] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[/data/] + call[name[date].strftime, parameter[constant[%Y]]]] + constant[/grdex/]] + name[tag]] + constant[/]]
variable[fname] assign[=] binary_operation[binary_operation[binary_operation[call[name[date].strftime, parameter[constant[%Y%m%d]]] + constant[.]] + name[tag]] + constant[.grdex]]
variable[local_fname] assign[=] binary_operation[name[fname] + constant[.bz2]]
variable[saved_fname] assign[=] call[name[os].path.join, parameter[name[data_path], name[local_fname]]]
variable[full_fname] assign[=] call[name[os].path.join, parameter[name[data_path], name[fname]]]
<ast.Try object at 0x7da18f00ed70>
return[None] | keyword[def] identifier[download] ( identifier[date_array] , identifier[tag] , identifier[sat_id] , identifier[data_path] , identifier[user] = keyword[None] , identifier[password] = keyword[None] ):
literal[string]
keyword[import] identifier[sys]
keyword[import] identifier[os]
keyword[import] identifier[pysftp]
keyword[import] identifier[davitpy]
keyword[if] identifier[user] keyword[is] keyword[None] :
identifier[user] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[if] identifier[password] keyword[is] keyword[None] :
identifier[password] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[with] identifier[pysftp] . identifier[Connection] (
identifier[os] . identifier[environ] [ literal[string] ],
identifier[username] = identifier[user] ,
identifier[password] = identifier[password] ) keyword[as] identifier[sftp] :
keyword[for] identifier[date] keyword[in] identifier[date_array] :
identifier[myDir] = literal[string] + identifier[date] . identifier[strftime] ( literal[string] )+ literal[string] + identifier[tag] + literal[string]
identifier[fname] = identifier[date] . identifier[strftime] ( literal[string] )+ literal[string] + identifier[tag] + literal[string]
identifier[local_fname] = identifier[fname] + literal[string]
identifier[saved_fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[local_fname] )
identifier[full_fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[fname] )
keyword[try] :
identifier[print] ( literal[string] + identifier[date] . identifier[strftime] ( literal[string] ))
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[sftp] . identifier[get] ( identifier[myDir] + identifier[local_fname] , identifier[saved_fname] )
identifier[os] . identifier[system] ( literal[string] + identifier[saved_fname] + literal[string] + identifier[full_fname] )
identifier[os] . identifier[system] ( literal[string] + identifier[saved_fname] )
keyword[except] identifier[IOError] :
identifier[print] ( literal[string] + identifier[date] . identifier[strftime] ( literal[string] ))
keyword[return] | def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""
Download SuperDARN data from Virginia Tech organized for loading by pysat.
"""
import sys
import os
import pysftp
import davitpy
if user is None:
user = os.environ['DBREADUSER'] # depends on [control=['if'], data=['user']]
if password is None:
password = os.environ['DBREADPASS'] # depends on [control=['if'], data=['password']]
with pysftp.Connection(os.environ['VTDB'], username=user, password=password) as sftp:
for date in date_array:
myDir = '/data/' + date.strftime('%Y') + '/grdex/' + tag + '/'
fname = date.strftime('%Y%m%d') + '.' + tag + '.grdex'
local_fname = fname + '.bz2'
saved_fname = os.path.join(data_path, local_fname)
full_fname = os.path.join(data_path, fname)
try:
print('Downloading file for ' + date.strftime('%D'))
sys.stdout.flush()
sftp.get(myDir + local_fname, saved_fname)
os.system('bunzip2 -c ' + saved_fname + ' > ' + full_fname)
os.system('rm ' + saved_fname) # depends on [control=['try'], data=[]]
except IOError:
print('File not available for ' + date.strftime('%D')) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['date']] # depends on [control=['with'], data=['sftp']]
return |
def vxi_command_query(library, session, mode, command):
"""Sends the device a miscellaneous command or query and/or retrieves the response to a previous query.
Corresponds to viVxiCommandQuery function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param mode: Specifies whether to issue a command and/or retrieve a response. (Constants.VXI_CMD*, .VXI_RESP*)
:param command: The miscellaneous command to send.
:return: The response retrieved from the device, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
response = ViUInt32()
ret = library.viVxiCommandQuery(session, mode, command, byref(response))
return response.value, ret | def function[vxi_command_query, parameter[library, session, mode, command]]:
constant[Sends the device a miscellaneous command or query and/or retrieves the response to a previous query.
Corresponds to viVxiCommandQuery function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param mode: Specifies whether to issue a command and/or retrieve a response. (Constants.VXI_CMD*, .VXI_RESP*)
:param command: The miscellaneous command to send.
:return: The response retrieved from the device, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
]
variable[response] assign[=] call[name[ViUInt32], parameter[]]
variable[ret] assign[=] call[name[library].viVxiCommandQuery, parameter[name[session], name[mode], name[command], call[name[byref], parameter[name[response]]]]]
return[tuple[[<ast.Attribute object at 0x7da2047e9ae0>, <ast.Name object at 0x7da2047ea170>]]] | keyword[def] identifier[vxi_command_query] ( identifier[library] , identifier[session] , identifier[mode] , identifier[command] ):
literal[string]
identifier[response] = identifier[ViUInt32] ()
identifier[ret] = identifier[library] . identifier[viVxiCommandQuery] ( identifier[session] , identifier[mode] , identifier[command] , identifier[byref] ( identifier[response] ))
keyword[return] identifier[response] . identifier[value] , identifier[ret] | def vxi_command_query(library, session, mode, command):
"""Sends the device a miscellaneous command or query and/or retrieves the response to a previous query.
Corresponds to viVxiCommandQuery function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param mode: Specifies whether to issue a command and/or retrieve a response. (Constants.VXI_CMD*, .VXI_RESP*)
:param command: The miscellaneous command to send.
:return: The response retrieved from the device, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
response = ViUInt32()
ret = library.viVxiCommandQuery(session, mode, command, byref(response))
return (response.value, ret) |
def createReport(tm, options, sequenceString, numSegments, numSynapses):
"""
Create CSV file with detailed trace of predictions, missed predictions,
accuracy, segment/synapse growth, etc.
"""
pac = tm.mmGetTracePredictedActiveColumns()
pic = tm.mmGetTracePredictedInactiveColumns()
upac = tm.mmGetTraceUnpredictedActiveColumns()
resultsFilename = os.path.join("results", options.name+"_"+str(int(100*options.noise))+".csv")
with open(resultsFilename,"wb") as resultsFile:
csvWriter = csv.writer(resultsFile)
accuracies = numpy.zeros(len(pac.data))
smoothedAccuracies = []
am = 0
csvWriter.writerow(["time", "element", "pac", "pic", "upac", "a",
"am", "accuracy", "sum", "nSegs", "nSyns"])
for i,j in enumerate(pac.data):
if i>0:
# Compute instantaneous and average accuracy.
a = computePredictionAccuracy(len(j), len(pic.data[i]))
# Smooth the curve to get averaged results for the paper.
am = 0.99*am + 0.01*a
accuracies[i] = am
i0 = max(0, i-60+1)
accuracy = numpy.mean(accuracies[i0:i+1])
smoothedAccuracies.append(accuracy)
row=[i, sequenceString[i], len(j), len(pic.data[i]),
len(upac.data[i]), a, am,
accuracy,
numpy.sum(accuracies[i0:i+1]),
numSegments[i], numSynapses[i]]
csvWriter.writerow(row)
return smoothedAccuracies | def function[createReport, parameter[tm, options, sequenceString, numSegments, numSynapses]]:
constant[
Create CSV file with detailed trace of predictions, missed predictions,
accuracy, segment/synapse growth, etc.
]
variable[pac] assign[=] call[name[tm].mmGetTracePredictedActiveColumns, parameter[]]
variable[pic] assign[=] call[name[tm].mmGetTracePredictedInactiveColumns, parameter[]]
variable[upac] assign[=] call[name[tm].mmGetTraceUnpredictedActiveColumns, parameter[]]
variable[resultsFilename] assign[=] call[name[os].path.join, parameter[constant[results], binary_operation[binary_operation[binary_operation[name[options].name + constant[_]] + call[name[str], parameter[call[name[int], parameter[binary_operation[constant[100] * name[options].noise]]]]]] + constant[.csv]]]]
with call[name[open], parameter[name[resultsFilename], constant[wb]]] begin[:]
variable[csvWriter] assign[=] call[name[csv].writer, parameter[name[resultsFile]]]
variable[accuracies] assign[=] call[name[numpy].zeros, parameter[call[name[len], parameter[name[pac].data]]]]
variable[smoothedAccuracies] assign[=] list[[]]
variable[am] assign[=] constant[0]
call[name[csvWriter].writerow, parameter[list[[<ast.Constant object at 0x7da1b08bb430>, <ast.Constant object at 0x7da1b08bbfa0>, <ast.Constant object at 0x7da1b08b99c0>, <ast.Constant object at 0x7da1b08bbd90>, <ast.Constant object at 0x7da1b08bbb50>, <ast.Constant object at 0x7da1b08ba440>, <ast.Constant object at 0x7da1b08bab00>, <ast.Constant object at 0x7da1b08b8bb0>, <ast.Constant object at 0x7da1b08bbd30>, <ast.Constant object at 0x7da1b08bbc40>, <ast.Constant object at 0x7da1b08bbf40>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b08bb580>, <ast.Name object at 0x7da1b08bb940>]]] in starred[call[name[enumerate], parameter[name[pac].data]]] begin[:]
if compare[name[i] greater[>] constant[0]] begin[:]
variable[a] assign[=] call[name[computePredictionAccuracy], parameter[call[name[len], parameter[name[j]]], call[name[len], parameter[call[name[pic].data][name[i]]]]]]
variable[am] assign[=] binary_operation[binary_operation[constant[0.99] * name[am]] + binary_operation[constant[0.01] * name[a]]]
call[name[accuracies]][name[i]] assign[=] name[am]
variable[i0] assign[=] call[name[max], parameter[constant[0], binary_operation[binary_operation[name[i] - constant[60]] + constant[1]]]]
variable[accuracy] assign[=] call[name[numpy].mean, parameter[call[name[accuracies]][<ast.Slice object at 0x7da1b09254e0>]]]
call[name[smoothedAccuracies].append, parameter[name[accuracy]]]
variable[row] assign[=] list[[<ast.Name object at 0x7da1b0926ef0>, <ast.Subscript object at 0x7da1b0925630>, <ast.Call object at 0x7da1b0926080>, <ast.Call object at 0x7da1b0925960>, <ast.Call object at 0x7da1b09253c0>, <ast.Name object at 0x7da1b09251e0>, <ast.Name object at 0x7da1b08643d0>, <ast.Name object at 0x7da1b0865a20>, <ast.Call object at 0x7da1b0864820>, <ast.Subscript object at 0x7da1b08642e0>, <ast.Subscript object at 0x7da1b0865780>]]
call[name[csvWriter].writerow, parameter[name[row]]]
return[name[smoothedAccuracies]] | keyword[def] identifier[createReport] ( identifier[tm] , identifier[options] , identifier[sequenceString] , identifier[numSegments] , identifier[numSynapses] ):
literal[string]
identifier[pac] = identifier[tm] . identifier[mmGetTracePredictedActiveColumns] ()
identifier[pic] = identifier[tm] . identifier[mmGetTracePredictedInactiveColumns] ()
identifier[upac] = identifier[tm] . identifier[mmGetTraceUnpredictedActiveColumns] ()
identifier[resultsFilename] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[options] . identifier[name] + literal[string] + identifier[str] ( identifier[int] ( literal[int] * identifier[options] . identifier[noise] ))+ literal[string] )
keyword[with] identifier[open] ( identifier[resultsFilename] , literal[string] ) keyword[as] identifier[resultsFile] :
identifier[csvWriter] = identifier[csv] . identifier[writer] ( identifier[resultsFile] )
identifier[accuracies] = identifier[numpy] . identifier[zeros] ( identifier[len] ( identifier[pac] . identifier[data] ))
identifier[smoothedAccuracies] =[]
identifier[am] = literal[int]
identifier[csvWriter] . identifier[writerow] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[enumerate] ( identifier[pac] . identifier[data] ):
keyword[if] identifier[i] > literal[int] :
identifier[a] = identifier[computePredictionAccuracy] ( identifier[len] ( identifier[j] ), identifier[len] ( identifier[pic] . identifier[data] [ identifier[i] ]))
identifier[am] = literal[int] * identifier[am] + literal[int] * identifier[a]
identifier[accuracies] [ identifier[i] ]= identifier[am]
identifier[i0] = identifier[max] ( literal[int] , identifier[i] - literal[int] + literal[int] )
identifier[accuracy] = identifier[numpy] . identifier[mean] ( identifier[accuracies] [ identifier[i0] : identifier[i] + literal[int] ])
identifier[smoothedAccuracies] . identifier[append] ( identifier[accuracy] )
identifier[row] =[ identifier[i] , identifier[sequenceString] [ identifier[i] ], identifier[len] ( identifier[j] ), identifier[len] ( identifier[pic] . identifier[data] [ identifier[i] ]),
identifier[len] ( identifier[upac] . identifier[data] [ identifier[i] ]), identifier[a] , identifier[am] ,
identifier[accuracy] ,
identifier[numpy] . identifier[sum] ( identifier[accuracies] [ identifier[i0] : identifier[i] + literal[int] ]),
identifier[numSegments] [ identifier[i] ], identifier[numSynapses] [ identifier[i] ]]
identifier[csvWriter] . identifier[writerow] ( identifier[row] )
keyword[return] identifier[smoothedAccuracies] | def createReport(tm, options, sequenceString, numSegments, numSynapses):
"""
Create CSV file with detailed trace of predictions, missed predictions,
accuracy, segment/synapse growth, etc.
"""
pac = tm.mmGetTracePredictedActiveColumns()
pic = tm.mmGetTracePredictedInactiveColumns()
upac = tm.mmGetTraceUnpredictedActiveColumns()
resultsFilename = os.path.join('results', options.name + '_' + str(int(100 * options.noise)) + '.csv')
with open(resultsFilename, 'wb') as resultsFile:
csvWriter = csv.writer(resultsFile)
accuracies = numpy.zeros(len(pac.data))
smoothedAccuracies = []
am = 0
csvWriter.writerow(['time', 'element', 'pac', 'pic', 'upac', 'a', 'am', 'accuracy', 'sum', 'nSegs', 'nSyns'])
for (i, j) in enumerate(pac.data):
if i > 0:
# Compute instantaneous and average accuracy.
a = computePredictionAccuracy(len(j), len(pic.data[i]))
# Smooth the curve to get averaged results for the paper.
am = 0.99 * am + 0.01 * a
accuracies[i] = am
i0 = max(0, i - 60 + 1)
accuracy = numpy.mean(accuracies[i0:i + 1])
smoothedAccuracies.append(accuracy)
row = [i, sequenceString[i], len(j), len(pic.data[i]), len(upac.data[i]), a, am, accuracy, numpy.sum(accuracies[i0:i + 1]), numSegments[i], numSynapses[i]]
csvWriter.writerow(row) # depends on [control=['if'], data=['i']] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['resultsFile']]
return smoothedAccuracies |
def select_where(self, where_col_list, where_value_list, col_name=''):
"""
selects rows from the array where col_list == val_list
"""
res = [] # list of rows to be returned
col_ids = [] # ids of the columns to check
#print('select_where : arr = ', len(self.arr), 'where_value_list = ', where_value_list)
for col_id, col in enumerate(self.header):
if col in where_col_list:
col_ids.append([col_id, col])
#print('select_where : col_ids = ', col_ids) # correctly prints [[0, 'TERM'], [2, 'ID']]
for row_num, row in enumerate(self.arr):
keep_this_row = True
#print('col_ids=', col_ids, ' row = ', row_num, row)
for ndx, where_col in enumerate(col_ids):
#print('type where_value_list[ndx] = ', type(where_value_list[ndx]))
#print('type row[where_col[0]] = ', type(row[where_col[0]]))
if row[where_col[0]] != where_value_list[ndx]:
keep_this_row = False
if keep_this_row is True:
if col_name == '':
res.append([row_num, row])
else: # extracting a single column only
l_dat = self.get_col_by_name(col_name)
if l_dat is not None:
res.append(row[l_dat])
return res | def function[select_where, parameter[self, where_col_list, where_value_list, col_name]]:
constant[
selects rows from the array where col_list == val_list
]
variable[res] assign[=] list[[]]
variable[col_ids] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20e9b13c0>, <ast.Name object at 0x7da20e9b1180>]]] in starred[call[name[enumerate], parameter[name[self].header]]] begin[:]
if compare[name[col] in name[where_col_list]] begin[:]
call[name[col_ids].append, parameter[list[[<ast.Name object at 0x7da20e9b12d0>, <ast.Name object at 0x7da20e9b1b70>]]]]
for taget[tuple[[<ast.Name object at 0x7da20e9b27d0>, <ast.Name object at 0x7da20e9b0880>]]] in starred[call[name[enumerate], parameter[name[self].arr]]] begin[:]
variable[keep_this_row] assign[=] constant[True]
for taget[tuple[[<ast.Name object at 0x7da20e9b3700>, <ast.Name object at 0x7da20e9b0c40>]]] in starred[call[name[enumerate], parameter[name[col_ids]]]] begin[:]
if compare[call[name[row]][call[name[where_col]][constant[0]]] not_equal[!=] call[name[where_value_list]][name[ndx]]] begin[:]
variable[keep_this_row] assign[=] constant[False]
if compare[name[keep_this_row] is constant[True]] begin[:]
if compare[name[col_name] equal[==] constant[]] begin[:]
call[name[res].append, parameter[list[[<ast.Name object at 0x7da20e9b0670>, <ast.Name object at 0x7da20e9b0cd0>]]]]
return[name[res]] | keyword[def] identifier[select_where] ( identifier[self] , identifier[where_col_list] , identifier[where_value_list] , identifier[col_name] = literal[string] ):
literal[string]
identifier[res] =[]
identifier[col_ids] =[]
keyword[for] identifier[col_id] , identifier[col] keyword[in] identifier[enumerate] ( identifier[self] . identifier[header] ):
keyword[if] identifier[col] keyword[in] identifier[where_col_list] :
identifier[col_ids] . identifier[append] ([ identifier[col_id] , identifier[col] ])
keyword[for] identifier[row_num] , identifier[row] keyword[in] identifier[enumerate] ( identifier[self] . identifier[arr] ):
identifier[keep_this_row] = keyword[True]
keyword[for] identifier[ndx] , identifier[where_col] keyword[in] identifier[enumerate] ( identifier[col_ids] ):
keyword[if] identifier[row] [ identifier[where_col] [ literal[int] ]]!= identifier[where_value_list] [ identifier[ndx] ]:
identifier[keep_this_row] = keyword[False]
keyword[if] identifier[keep_this_row] keyword[is] keyword[True] :
keyword[if] identifier[col_name] == literal[string] :
identifier[res] . identifier[append] ([ identifier[row_num] , identifier[row] ])
keyword[else] :
identifier[l_dat] = identifier[self] . identifier[get_col_by_name] ( identifier[col_name] )
keyword[if] identifier[l_dat] keyword[is] keyword[not] keyword[None] :
identifier[res] . identifier[append] ( identifier[row] [ identifier[l_dat] ])
keyword[return] identifier[res] | def select_where(self, where_col_list, where_value_list, col_name=''):
"""
selects rows from the array where col_list == val_list
"""
res = [] # list of rows to be returned
col_ids = [] # ids of the columns to check
#print('select_where : arr = ', len(self.arr), 'where_value_list = ', where_value_list)
for (col_id, col) in enumerate(self.header):
if col in where_col_list:
col_ids.append([col_id, col]) # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=[]]
#print('select_where : col_ids = ', col_ids) # correctly prints [[0, 'TERM'], [2, 'ID']]
for (row_num, row) in enumerate(self.arr):
keep_this_row = True
#print('col_ids=', col_ids, ' row = ', row_num, row)
for (ndx, where_col) in enumerate(col_ids):
#print('type where_value_list[ndx] = ', type(where_value_list[ndx]))
#print('type row[where_col[0]] = ', type(row[where_col[0]]))
if row[where_col[0]] != where_value_list[ndx]:
keep_this_row = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if keep_this_row is True:
if col_name == '':
res.append([row_num, row]) # depends on [control=['if'], data=[]]
else: # extracting a single column only
l_dat = self.get_col_by_name(col_name)
if l_dat is not None:
res.append(row[l_dat]) # depends on [control=['if'], data=['l_dat']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return res |
def rhyming_part(phones):
"""Get the "rhyming part" of a string with CMUdict phones.
"Rhyming part" here means everything from the vowel in the stressed
syllable nearest the end of the word up to the end of the word.
.. doctest::
>>> import pronouncing
>>> phones = pronouncing.phones_for_word("purple")
>>> pronouncing.rhyming_part(phones[0])
'ER1 P AH0 L'
:param phones: a string containing space-separated CMUdict phones
:returns: a string with just the "rhyming part" of those phones
"""
phones_list = phones.split()
for i in range(len(phones_list) - 1, 0, -1):
if phones_list[i][-1] in '12':
return ' '.join(phones_list[i:])
return phones | def function[rhyming_part, parameter[phones]]:
constant[Get the "rhyming part" of a string with CMUdict phones.
"Rhyming part" here means everything from the vowel in the stressed
syllable nearest the end of the word up to the end of the word.
.. doctest::
>>> import pronouncing
>>> phones = pronouncing.phones_for_word("purple")
>>> pronouncing.rhyming_part(phones[0])
'ER1 P AH0 L'
:param phones: a string containing space-separated CMUdict phones
:returns: a string with just the "rhyming part" of those phones
]
variable[phones_list] assign[=] call[name[phones].split, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[phones_list]]] - constant[1]], constant[0], <ast.UnaryOp object at 0x7da1b191e3b0>]]] begin[:]
if compare[call[call[name[phones_list]][name[i]]][<ast.UnaryOp object at 0x7da1b191e6e0>] in constant[12]] begin[:]
return[call[constant[ ].join, parameter[call[name[phones_list]][<ast.Slice object at 0x7da1b191e440>]]]]
return[name[phones]] | keyword[def] identifier[rhyming_part] ( identifier[phones] ):
literal[string]
identifier[phones_list] = identifier[phones] . identifier[split] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[phones_list] )- literal[int] , literal[int] ,- literal[int] ):
keyword[if] identifier[phones_list] [ identifier[i] ][- literal[int] ] keyword[in] literal[string] :
keyword[return] literal[string] . identifier[join] ( identifier[phones_list] [ identifier[i] :])
keyword[return] identifier[phones] | def rhyming_part(phones):
"""Get the "rhyming part" of a string with CMUdict phones.
"Rhyming part" here means everything from the vowel in the stressed
syllable nearest the end of the word up to the end of the word.
.. doctest::
>>> import pronouncing
>>> phones = pronouncing.phones_for_word("purple")
>>> pronouncing.rhyming_part(phones[0])
'ER1 P AH0 L'
:param phones: a string containing space-separated CMUdict phones
:returns: a string with just the "rhyming part" of those phones
"""
phones_list = phones.split()
for i in range(len(phones_list) - 1, 0, -1):
if phones_list[i][-1] in '12':
return ' '.join(phones_list[i:]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return phones |
def index(self, value: Any) -> int:
"""
Returns the index in the handlers list
that matches the given value.
If no condition matches, ValueError is raised.
"""
for i, cond in ((j[0], j[1][0]) for j in enumerate(self.handlers)):
try:
match = cond(value)
except:
if self.raiseconditionerrors:
raise
match = False
if match:
return i
raise TypedloadValueError('Unable to dump %s' % value, value=value) | def function[index, parameter[self, value]]:
constant[
Returns the index in the handlers list
that matches the given value.
If no condition matches, ValueError is raised.
]
for taget[tuple[[<ast.Name object at 0x7da2041d8520>, <ast.Name object at 0x7da20c993c70>]]] in starred[<ast.GeneratorExp object at 0x7da20c992080>] begin[:]
<ast.Try object at 0x7da20c993e80>
if name[match] begin[:]
return[name[i]]
<ast.Raise object at 0x7da20c9914b0> | keyword[def] identifier[index] ( identifier[self] , identifier[value] : identifier[Any] )-> identifier[int] :
literal[string]
keyword[for] identifier[i] , identifier[cond] keyword[in] (( identifier[j] [ literal[int] ], identifier[j] [ literal[int] ][ literal[int] ]) keyword[for] identifier[j] keyword[in] identifier[enumerate] ( identifier[self] . identifier[handlers] )):
keyword[try] :
identifier[match] = identifier[cond] ( identifier[value] )
keyword[except] :
keyword[if] identifier[self] . identifier[raiseconditionerrors] :
keyword[raise]
identifier[match] = keyword[False]
keyword[if] identifier[match] :
keyword[return] identifier[i]
keyword[raise] identifier[TypedloadValueError] ( literal[string] % identifier[value] , identifier[value] = identifier[value] ) | def index(self, value: Any) -> int:
"""
Returns the index in the handlers list
that matches the given value.
If no condition matches, ValueError is raised.
"""
for (i, cond) in ((j[0], j[1][0]) for j in enumerate(self.handlers)):
try:
match = cond(value) # depends on [control=['try'], data=[]]
except:
if self.raiseconditionerrors:
raise # depends on [control=['if'], data=[]]
match = False # depends on [control=['except'], data=[]]
if match:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise TypedloadValueError('Unable to dump %s' % value, value=value) |
def record(self, ch_node):
'''
Incremental changes
'''
rec = self.serialize_node(ch_node)
self.history.append(rec) | def function[record, parameter[self, ch_node]]:
constant[
Incremental changes
]
variable[rec] assign[=] call[name[self].serialize_node, parameter[name[ch_node]]]
call[name[self].history.append, parameter[name[rec]]] | keyword[def] identifier[record] ( identifier[self] , identifier[ch_node] ):
literal[string]
identifier[rec] = identifier[self] . identifier[serialize_node] ( identifier[ch_node] )
identifier[self] . identifier[history] . identifier[append] ( identifier[rec] ) | def record(self, ch_node):
"""
Incremental changes
"""
rec = self.serialize_node(ch_node)
self.history.append(rec) |
def content_type(self):
"""
Returns the content type of the request in all cases where it is
different than a submitted form - application/x-www-form-urlencoded
"""
type_formencoded = "application/x-www-form-urlencoded"
ctype = self.request.META.get('CONTENT_TYPE', type_formencoded)
if type_formencoded in ctype:
return None
return ctype | def function[content_type, parameter[self]]:
constant[
Returns the content type of the request in all cases where it is
different than a submitted form - application/x-www-form-urlencoded
]
variable[type_formencoded] assign[=] constant[application/x-www-form-urlencoded]
variable[ctype] assign[=] call[name[self].request.META.get, parameter[constant[CONTENT_TYPE], name[type_formencoded]]]
if compare[name[type_formencoded] in name[ctype]] begin[:]
return[constant[None]]
return[name[ctype]] | keyword[def] identifier[content_type] ( identifier[self] ):
literal[string]
identifier[type_formencoded] = literal[string]
identifier[ctype] = identifier[self] . identifier[request] . identifier[META] . identifier[get] ( literal[string] , identifier[type_formencoded] )
keyword[if] identifier[type_formencoded] keyword[in] identifier[ctype] :
keyword[return] keyword[None]
keyword[return] identifier[ctype] | def content_type(self):
"""
Returns the content type of the request in all cases where it is
different than a submitted form - application/x-www-form-urlencoded
"""
type_formencoded = 'application/x-www-form-urlencoded'
ctype = self.request.META.get('CONTENT_TYPE', type_formencoded)
if type_formencoded in ctype:
return None # depends on [control=['if'], data=[]]
return ctype |
def construct_pdb_to_rosetta_residue_map(self, rosetta_scripts_path, rosetta_database_path = None, extra_command_flags = None, cache_dir = None):
''' Uses the features database to create a mapping from Rosetta-numbered residues to PDB ATOM residues.
Next, the object's rosetta_sequences (a dict of Sequences) element is created.
Finally, a SequenceMap object is created mapping the Rosetta Sequences to the ATOM Sequences.
The extra_command_flags parameter expects a string e.g. "-ignore_zero_occupancy false".
If cache_dir is passed then the file <self.pdb_id>.
'''
## Create a mapping from Rosetta-numbered residues to PDB ATOM residues
import json
# Apply any PDB-specific hacks
specific_flag_hacks = None
if self.pdb_id and HACKS_pdb_specific_hacks.get(self.pdb_id):
specific_flag_hacks = HACKS_pdb_specific_hacks[self.pdb_id]
skeletal_chains = sorted([k for k in self.chain_types.keys() if self.chain_types[k] == 'Protein skeleton'])
if skeletal_chains:
raise PDBMissingMainchainAtomsException('The PDB to Rosetta residue map could not be created as chains %s only have CA atoms present.' % ", ".join(skeletal_chains))
# Get the residue mapping using the features database
mapping = None
cached_json_mapping_filepath = None
if cache_dir:
cached_json_mapping_filepath = os.path.join(cache_dir, '{0}.rosetta2pdb.rawmap.json'.format(self.pdb_id)) # note: the resmap.json file created by self.get_atom_sequence_to_rosetta_json_map is more involved - rawmap is simply what is returned by get_pdb_contents_to_pose_residue_map
if self.pdb_id and cache_dir and os.path.exists(cached_json_mapping_filepath):
# Read cached file
try:
mapping = json.loads(read_file(cached_json_mapping_filepath))
except: pass
if mapping == None:
pdb_file_contents = "\n".join(self.structure_lines)
success, mapping = get_pdb_contents_to_pose_residue_map(pdb_file_contents, rosetta_scripts_path, rosetta_database_path = rosetta_database_path, pdb_id = self.pdb_id, extra_flags = ((specific_flag_hacks or '') + ' ' + (extra_command_flags or '')).strip())
if not success:
raise colortext.Exception("An error occurred mapping the PDB ATOM residue IDs to the Rosetta numbering.\n%s" % "\n".join(mapping))
if self.pdb_id and cache_dir:
write_file(cached_json_mapping_filepath, json.dumps(mapping, indent = 4, sort_keys = True))
## Create Sequences for the Rosetta residues (self.rosetta_sequences)
# Initialize maps
rosetta_residues = {}
rosetta_sequences = {}
for chain_id in self.atom_chain_order:
chain_type = self.chain_types[chain_id]
rosetta_residues[chain_id] = {}
rosetta_sequences[chain_id] = Sequence(chain_type)
# Create a map rosetta_residues, Chain -> Rosetta residue ID -> Rosetta residue information
rosetta_pdb_mappings = {}
for chain_id in self.atom_chain_order:
rosetta_pdb_mappings[chain_id] = {}
for k, v in mapping.iteritems():
rosetta_residues[k[0]][v['pose_residue_id']] = v
rosetta_pdb_mappings[k[0]][v['pose_residue_id']] = k
# Create rosetta_sequences map Chain -> Sequence(Residue)
for chain_id, v in sorted(rosetta_residues.iteritems()):
chain_type = self.chain_types[chain_id]
for rosetta_id, residue_info in sorted(v.iteritems()):
short_residue_type = None
residue_type = None
if chain_type == 'Protein':
residue_type = residue_info['name3'].strip()
short_residue_type = residue_type_3to1_map.get(residue_type, 'X') # some HETATMs can be passed here e.g. MG so we can not map those cases
else:
residue_type = residue_info['res_type'].strip()
if chain_type == 'DNA':
if residue_type.find('UpperDNA') != -1 or residue_type.find('LowerDNA') != -1:
residue_type = residue_type[:3]
short_residue_type = dna_nucleotides_3to1_map.get(residue_type) # Commenting this out since Rosetta does not seem to handle these "or non_canonical_dna.get(residue_type)"
else:
assert(chain_type == 'RNA')
if residue_type.find('UpperRNA') != -1 or residue_type.find('LowerRNA') != -1 or (len(residue_type) > 3 and residue_type[3] == ':'):
residue_type = residue_type[:3]
short_residue_type = rna_nucleotides_3to1_map.get(residue_type)
if short_residue_type == None:
raise colortext.Exception('Could not determine the one-letter code of the residue: chain {0}, chain_type "{1}", residue "{2}", residue type "{3}".'.format(chain_id, chain_type, rosetta_id, residue_type))
rosetta_sequences[chain_id].add(Residue(chain_id, rosetta_id, short_residue_type, chain_type))
## Create SequenceMap objects to map the Rosetta Sequences to the ATOM Sequences
rosetta_to_atom_sequence_maps = {}
for chain_id, rosetta_pdb_mapping in rosetta_pdb_mappings.iteritems():
rosetta_to_atom_sequence_maps[chain_id] = SequenceMap.from_dict(rosetta_pdb_mapping)
self.rosetta_to_atom_sequence_maps = rosetta_to_atom_sequence_maps
self.rosetta_sequences = rosetta_sequences | def function[construct_pdb_to_rosetta_residue_map, parameter[self, rosetta_scripts_path, rosetta_database_path, extra_command_flags, cache_dir]]:
constant[ Uses the features database to create a mapping from Rosetta-numbered residues to PDB ATOM residues.
Next, the object's rosetta_sequences (a dict of Sequences) element is created.
Finally, a SequenceMap object is created mapping the Rosetta Sequences to the ATOM Sequences.
The extra_command_flags parameter expects a string e.g. "-ignore_zero_occupancy false".
If cache_dir is passed then the file <self.pdb_id>.
]
import module[json]
variable[specific_flag_hacks] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18ede44f0> begin[:]
variable[specific_flag_hacks] assign[=] call[name[HACKS_pdb_specific_hacks]][name[self].pdb_id]
variable[skeletal_chains] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da20e7483a0>]]
if name[skeletal_chains] begin[:]
<ast.Raise object at 0x7da20e74a260>
variable[mapping] assign[=] constant[None]
variable[cached_json_mapping_filepath] assign[=] constant[None]
if name[cache_dir] begin[:]
variable[cached_json_mapping_filepath] assign[=] call[name[os].path.join, parameter[name[cache_dir], call[constant[{0}.rosetta2pdb.rawmap.json].format, parameter[name[self].pdb_id]]]]
if <ast.BoolOp object at 0x7da20c6a9930> begin[:]
<ast.Try object at 0x7da20c6ab4c0>
if compare[name[mapping] equal[==] constant[None]] begin[:]
variable[pdb_file_contents] assign[=] call[constant[
].join, parameter[name[self].structure_lines]]
<ast.Tuple object at 0x7da18f58e1a0> assign[=] call[name[get_pdb_contents_to_pose_residue_map], parameter[name[pdb_file_contents], name[rosetta_scripts_path]]]
if <ast.UnaryOp object at 0x7da20c6aa950> begin[:]
<ast.Raise object at 0x7da20c6a89d0>
if <ast.BoolOp object at 0x7da18dc04e50> begin[:]
call[name[write_file], parameter[name[cached_json_mapping_filepath], call[name[json].dumps, parameter[name[mapping]]]]]
variable[rosetta_residues] assign[=] dictionary[[], []]
variable[rosetta_sequences] assign[=] dictionary[[], []]
for taget[name[chain_id]] in starred[name[self].atom_chain_order] begin[:]
variable[chain_type] assign[=] call[name[self].chain_types][name[chain_id]]
call[name[rosetta_residues]][name[chain_id]] assign[=] dictionary[[], []]
call[name[rosetta_sequences]][name[chain_id]] assign[=] call[name[Sequence], parameter[name[chain_type]]]
variable[rosetta_pdb_mappings] assign[=] dictionary[[], []]
for taget[name[chain_id]] in starred[name[self].atom_chain_order] begin[:]
call[name[rosetta_pdb_mappings]][name[chain_id]] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18dc05060>, <ast.Name object at 0x7da18dc063b0>]]] in starred[call[name[mapping].iteritems, parameter[]]] begin[:]
call[call[name[rosetta_residues]][call[name[k]][constant[0]]]][call[name[v]][constant[pose_residue_id]]] assign[=] name[v]
call[call[name[rosetta_pdb_mappings]][call[name[k]][constant[0]]]][call[name[v]][constant[pose_residue_id]]] assign[=] name[k]
for taget[tuple[[<ast.Name object at 0x7da18dc045b0>, <ast.Name object at 0x7da18dc04a90>]]] in starred[call[name[sorted], parameter[call[name[rosetta_residues].iteritems, parameter[]]]]] begin[:]
variable[chain_type] assign[=] call[name[self].chain_types][name[chain_id]]
for taget[tuple[[<ast.Name object at 0x7da20c6c5390>, <ast.Name object at 0x7da20c6c6ad0>]]] in starred[call[name[sorted], parameter[call[name[v].iteritems, parameter[]]]]] begin[:]
variable[short_residue_type] assign[=] constant[None]
variable[residue_type] assign[=] constant[None]
if compare[name[chain_type] equal[==] constant[Protein]] begin[:]
variable[residue_type] assign[=] call[call[name[residue_info]][constant[name3]].strip, parameter[]]
variable[short_residue_type] assign[=] call[name[residue_type_3to1_map].get, parameter[name[residue_type], constant[X]]]
if compare[name[short_residue_type] equal[==] constant[None]] begin[:]
<ast.Raise object at 0x7da20c796920>
call[call[name[rosetta_sequences]][name[chain_id]].add, parameter[call[name[Residue], parameter[name[chain_id], name[rosetta_id], name[short_residue_type], name[chain_type]]]]]
variable[rosetta_to_atom_sequence_maps] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2054a6f20>, <ast.Name object at 0x7da2054a7430>]]] in starred[call[name[rosetta_pdb_mappings].iteritems, parameter[]]] begin[:]
call[name[rosetta_to_atom_sequence_maps]][name[chain_id]] assign[=] call[name[SequenceMap].from_dict, parameter[name[rosetta_pdb_mapping]]]
name[self].rosetta_to_atom_sequence_maps assign[=] name[rosetta_to_atom_sequence_maps]
name[self].rosetta_sequences assign[=] name[rosetta_sequences] | keyword[def] identifier[construct_pdb_to_rosetta_residue_map] ( identifier[self] , identifier[rosetta_scripts_path] , identifier[rosetta_database_path] = keyword[None] , identifier[extra_command_flags] = keyword[None] , identifier[cache_dir] = keyword[None] ):
literal[string]
keyword[import] identifier[json]
identifier[specific_flag_hacks] = keyword[None]
keyword[if] identifier[self] . identifier[pdb_id] keyword[and] identifier[HACKS_pdb_specific_hacks] . identifier[get] ( identifier[self] . identifier[pdb_id] ):
identifier[specific_flag_hacks] = identifier[HACKS_pdb_specific_hacks] [ identifier[self] . identifier[pdb_id] ]
identifier[skeletal_chains] = identifier[sorted] ([ identifier[k] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[chain_types] . identifier[keys] () keyword[if] identifier[self] . identifier[chain_types] [ identifier[k] ]== literal[string] ])
keyword[if] identifier[skeletal_chains] :
keyword[raise] identifier[PDBMissingMainchainAtomsException] ( literal[string] % literal[string] . identifier[join] ( identifier[skeletal_chains] ))
identifier[mapping] = keyword[None]
identifier[cached_json_mapping_filepath] = keyword[None]
keyword[if] identifier[cache_dir] :
identifier[cached_json_mapping_filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[cache_dir] , literal[string] . identifier[format] ( identifier[self] . identifier[pdb_id] ))
keyword[if] identifier[self] . identifier[pdb_id] keyword[and] identifier[cache_dir] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[cached_json_mapping_filepath] ):
keyword[try] :
identifier[mapping] = identifier[json] . identifier[loads] ( identifier[read_file] ( identifier[cached_json_mapping_filepath] ))
keyword[except] : keyword[pass]
keyword[if] identifier[mapping] == keyword[None] :
identifier[pdb_file_contents] = literal[string] . identifier[join] ( identifier[self] . identifier[structure_lines] )
identifier[success] , identifier[mapping] = identifier[get_pdb_contents_to_pose_residue_map] ( identifier[pdb_file_contents] , identifier[rosetta_scripts_path] , identifier[rosetta_database_path] = identifier[rosetta_database_path] , identifier[pdb_id] = identifier[self] . identifier[pdb_id] , identifier[extra_flags] =(( identifier[specific_flag_hacks] keyword[or] literal[string] )+ literal[string] +( identifier[extra_command_flags] keyword[or] literal[string] )). identifier[strip] ())
keyword[if] keyword[not] identifier[success] :
keyword[raise] identifier[colortext] . identifier[Exception] ( literal[string] % literal[string] . identifier[join] ( identifier[mapping] ))
keyword[if] identifier[self] . identifier[pdb_id] keyword[and] identifier[cache_dir] :
identifier[write_file] ( identifier[cached_json_mapping_filepath] , identifier[json] . identifier[dumps] ( identifier[mapping] , identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] ))
identifier[rosetta_residues] ={}
identifier[rosetta_sequences] ={}
keyword[for] identifier[chain_id] keyword[in] identifier[self] . identifier[atom_chain_order] :
identifier[chain_type] = identifier[self] . identifier[chain_types] [ identifier[chain_id] ]
identifier[rosetta_residues] [ identifier[chain_id] ]={}
identifier[rosetta_sequences] [ identifier[chain_id] ]= identifier[Sequence] ( identifier[chain_type] )
identifier[rosetta_pdb_mappings] ={}
keyword[for] identifier[chain_id] keyword[in] identifier[self] . identifier[atom_chain_order] :
identifier[rosetta_pdb_mappings] [ identifier[chain_id] ]={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[mapping] . identifier[iteritems] ():
identifier[rosetta_residues] [ identifier[k] [ literal[int] ]][ identifier[v] [ literal[string] ]]= identifier[v]
identifier[rosetta_pdb_mappings] [ identifier[k] [ literal[int] ]][ identifier[v] [ literal[string] ]]= identifier[k]
keyword[for] identifier[chain_id] , identifier[v] keyword[in] identifier[sorted] ( identifier[rosetta_residues] . identifier[iteritems] ()):
identifier[chain_type] = identifier[self] . identifier[chain_types] [ identifier[chain_id] ]
keyword[for] identifier[rosetta_id] , identifier[residue_info] keyword[in] identifier[sorted] ( identifier[v] . identifier[iteritems] ()):
identifier[short_residue_type] = keyword[None]
identifier[residue_type] = keyword[None]
keyword[if] identifier[chain_type] == literal[string] :
identifier[residue_type] = identifier[residue_info] [ literal[string] ]. identifier[strip] ()
identifier[short_residue_type] = identifier[residue_type_3to1_map] . identifier[get] ( identifier[residue_type] , literal[string] )
keyword[else] :
identifier[residue_type] = identifier[residue_info] [ literal[string] ]. identifier[strip] ()
keyword[if] identifier[chain_type] == literal[string] :
keyword[if] identifier[residue_type] . identifier[find] ( literal[string] )!=- literal[int] keyword[or] identifier[residue_type] . identifier[find] ( literal[string] )!=- literal[int] :
identifier[residue_type] = identifier[residue_type] [: literal[int] ]
identifier[short_residue_type] = identifier[dna_nucleotides_3to1_map] . identifier[get] ( identifier[residue_type] )
keyword[else] :
keyword[assert] ( identifier[chain_type] == literal[string] )
keyword[if] identifier[residue_type] . identifier[find] ( literal[string] )!=- literal[int] keyword[or] identifier[residue_type] . identifier[find] ( literal[string] )!=- literal[int] keyword[or] ( identifier[len] ( identifier[residue_type] )> literal[int] keyword[and] identifier[residue_type] [ literal[int] ]== literal[string] ):
identifier[residue_type] = identifier[residue_type] [: literal[int] ]
identifier[short_residue_type] = identifier[rna_nucleotides_3to1_map] . identifier[get] ( identifier[residue_type] )
keyword[if] identifier[short_residue_type] == keyword[None] :
keyword[raise] identifier[colortext] . identifier[Exception] ( literal[string] . identifier[format] ( identifier[chain_id] , identifier[chain_type] , identifier[rosetta_id] , identifier[residue_type] ))
identifier[rosetta_sequences] [ identifier[chain_id] ]. identifier[add] ( identifier[Residue] ( identifier[chain_id] , identifier[rosetta_id] , identifier[short_residue_type] , identifier[chain_type] ))
identifier[rosetta_to_atom_sequence_maps] ={}
keyword[for] identifier[chain_id] , identifier[rosetta_pdb_mapping] keyword[in] identifier[rosetta_pdb_mappings] . identifier[iteritems] ():
identifier[rosetta_to_atom_sequence_maps] [ identifier[chain_id] ]= identifier[SequenceMap] . identifier[from_dict] ( identifier[rosetta_pdb_mapping] )
identifier[self] . identifier[rosetta_to_atom_sequence_maps] = identifier[rosetta_to_atom_sequence_maps]
identifier[self] . identifier[rosetta_sequences] = identifier[rosetta_sequences] | def construct_pdb_to_rosetta_residue_map(self, rosetta_scripts_path, rosetta_database_path=None, extra_command_flags=None, cache_dir=None):
""" Uses the features database to create a mapping from Rosetta-numbered residues to PDB ATOM residues.
Next, the object's rosetta_sequences (a dict of Sequences) element is created.
Finally, a SequenceMap object is created mapping the Rosetta Sequences to the ATOM Sequences.
The extra_command_flags parameter expects a string e.g. "-ignore_zero_occupancy false".
If cache_dir is passed then the file <self.pdb_id>.
"""
## Create a mapping from Rosetta-numbered residues to PDB ATOM residues
import json
# Apply any PDB-specific hacks
specific_flag_hacks = None
if self.pdb_id and HACKS_pdb_specific_hacks.get(self.pdb_id):
specific_flag_hacks = HACKS_pdb_specific_hacks[self.pdb_id] # depends on [control=['if'], data=[]]
skeletal_chains = sorted([k for k in self.chain_types.keys() if self.chain_types[k] == 'Protein skeleton'])
if skeletal_chains:
raise PDBMissingMainchainAtomsException('The PDB to Rosetta residue map could not be created as chains %s only have CA atoms present.' % ', '.join(skeletal_chains)) # depends on [control=['if'], data=[]]
# Get the residue mapping using the features database
mapping = None
cached_json_mapping_filepath = None
if cache_dir:
cached_json_mapping_filepath = os.path.join(cache_dir, '{0}.rosetta2pdb.rawmap.json'.format(self.pdb_id)) # note: the resmap.json file created by self.get_atom_sequence_to_rosetta_json_map is more involved - rawmap is simply what is returned by get_pdb_contents_to_pose_residue_map # depends on [control=['if'], data=[]]
if self.pdb_id and cache_dir and os.path.exists(cached_json_mapping_filepath):
# Read cached file
try:
mapping = json.loads(read_file(cached_json_mapping_filepath)) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if mapping == None:
pdb_file_contents = '\n'.join(self.structure_lines)
(success, mapping) = get_pdb_contents_to_pose_residue_map(pdb_file_contents, rosetta_scripts_path, rosetta_database_path=rosetta_database_path, pdb_id=self.pdb_id, extra_flags=((specific_flag_hacks or '') + ' ' + (extra_command_flags or '')).strip())
if not success:
raise colortext.Exception('An error occurred mapping the PDB ATOM residue IDs to the Rosetta numbering.\n%s' % '\n'.join(mapping)) # depends on [control=['if'], data=[]]
if self.pdb_id and cache_dir:
write_file(cached_json_mapping_filepath, json.dumps(mapping, indent=4, sort_keys=True)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['mapping']]
## Create Sequences for the Rosetta residues (self.rosetta_sequences)
# Initialize maps
rosetta_residues = {}
rosetta_sequences = {}
for chain_id in self.atom_chain_order:
chain_type = self.chain_types[chain_id]
rosetta_residues[chain_id] = {}
rosetta_sequences[chain_id] = Sequence(chain_type) # depends on [control=['for'], data=['chain_id']]
# Create a map rosetta_residues, Chain -> Rosetta residue ID -> Rosetta residue information
rosetta_pdb_mappings = {}
for chain_id in self.atom_chain_order:
rosetta_pdb_mappings[chain_id] = {} # depends on [control=['for'], data=['chain_id']]
for (k, v) in mapping.iteritems():
rosetta_residues[k[0]][v['pose_residue_id']] = v
rosetta_pdb_mappings[k[0]][v['pose_residue_id']] = k # depends on [control=['for'], data=[]]
# Create rosetta_sequences map Chain -> Sequence(Residue)
for (chain_id, v) in sorted(rosetta_residues.iteritems()):
chain_type = self.chain_types[chain_id]
for (rosetta_id, residue_info) in sorted(v.iteritems()):
short_residue_type = None
residue_type = None
if chain_type == 'Protein':
residue_type = residue_info['name3'].strip()
short_residue_type = residue_type_3to1_map.get(residue_type, 'X') # some HETATMs can be passed here e.g. MG so we can not map those cases # depends on [control=['if'], data=[]]
else:
residue_type = residue_info['res_type'].strip()
if chain_type == 'DNA':
if residue_type.find('UpperDNA') != -1 or residue_type.find('LowerDNA') != -1:
residue_type = residue_type[:3] # depends on [control=['if'], data=[]]
short_residue_type = dna_nucleotides_3to1_map.get(residue_type) # Commenting this out since Rosetta does not seem to handle these "or non_canonical_dna.get(residue_type)" # depends on [control=['if'], data=[]]
else:
assert chain_type == 'RNA'
if residue_type.find('UpperRNA') != -1 or residue_type.find('LowerRNA') != -1 or (len(residue_type) > 3 and residue_type[3] == ':'):
residue_type = residue_type[:3] # depends on [control=['if'], data=[]]
short_residue_type = rna_nucleotides_3to1_map.get(residue_type)
if short_residue_type == None:
raise colortext.Exception('Could not determine the one-letter code of the residue: chain {0}, chain_type "{1}", residue "{2}", residue type "{3}".'.format(chain_id, chain_type, rosetta_id, residue_type)) # depends on [control=['if'], data=[]]
rosetta_sequences[chain_id].add(Residue(chain_id, rosetta_id, short_residue_type, chain_type)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
## Create SequenceMap objects to map the Rosetta Sequences to the ATOM Sequences
rosetta_to_atom_sequence_maps = {}
for (chain_id, rosetta_pdb_mapping) in rosetta_pdb_mappings.iteritems():
rosetta_to_atom_sequence_maps[chain_id] = SequenceMap.from_dict(rosetta_pdb_mapping) # depends on [control=['for'], data=[]]
self.rosetta_to_atom_sequence_maps = rosetta_to_atom_sequence_maps
self.rosetta_sequences = rosetta_sequences |
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first) | def function[find, parameter[self, selector]]:
constant[Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
]
if call[name[isinstance], parameter[name[containing], name[str]]] begin[:]
variable[containing] assign[=] list[[<ast.Name object at 0x7da1b1fca080>]]
variable[encoding] assign[=] <ast.BoolOp object at 0x7da1b1fc9c00>
variable[elements] assign[=] <ast.ListComp object at 0x7da1b1fc8fa0>
if name[containing] begin[:]
variable[elements_copy] assign[=] call[name[elements].copy, parameter[]]
variable[elements] assign[=] list[[]]
for taget[name[element]] in starred[name[elements_copy]] begin[:]
if call[name[any], parameter[<ast.ListComp object at 0x7da1b1fcad70>]] begin[:]
call[name[elements].append, parameter[name[element]]]
call[name[elements].reverse, parameter[]]
if name[clean] begin[:]
variable[elements_copy] assign[=] call[name[elements].copy, parameter[]]
variable[elements] assign[=] list[[]]
for taget[name[element]] in starred[name[elements_copy]] begin[:]
name[element].raw_html assign[=] call[name[lxml_html_tostring], parameter[call[name[cleaner].clean_html, parameter[name[element].lxml]]]]
call[name[elements].append, parameter[name[element]]]
return[call[name[_get_first_or_list], parameter[name[elements], name[first]]]] | keyword[def] identifier[find] ( identifier[self] , identifier[selector] : identifier[str] = literal[string] ,*, identifier[containing] : identifier[_Containing] = keyword[None] , identifier[clean] : identifier[bool] = keyword[False] , identifier[first] : identifier[bool] = keyword[False] , identifier[_encoding] : identifier[str] = keyword[None] )-> identifier[_Find] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[containing] , identifier[str] ):
identifier[containing] =[ identifier[containing] ]
identifier[encoding] = identifier[_encoding] keyword[or] identifier[self] . identifier[encoding]
identifier[elements] =[
identifier[Element] ( identifier[element] = identifier[found] , identifier[url] = identifier[self] . identifier[url] , identifier[default_encoding] = identifier[encoding] )
keyword[for] identifier[found] keyword[in] identifier[self] . identifier[pq] ( identifier[selector] )
]
keyword[if] identifier[containing] :
identifier[elements_copy] = identifier[elements] . identifier[copy] ()
identifier[elements] =[]
keyword[for] identifier[element] keyword[in] identifier[elements_copy] :
keyword[if] identifier[any] ([ identifier[c] . identifier[lower] () keyword[in] identifier[element] . identifier[full_text] . identifier[lower] () keyword[for] identifier[c] keyword[in] identifier[containing] ]):
identifier[elements] . identifier[append] ( identifier[element] )
identifier[elements] . identifier[reverse] ()
keyword[if] identifier[clean] :
identifier[elements_copy] = identifier[elements] . identifier[copy] ()
identifier[elements] =[]
keyword[for] identifier[element] keyword[in] identifier[elements_copy] :
identifier[element] . identifier[raw_html] = identifier[lxml_html_tostring] ( identifier[cleaner] . identifier[clean_html] ( identifier[element] . identifier[lxml] ))
identifier[elements] . identifier[append] ( identifier[element] )
keyword[return] identifier[_get_first_or_list] ( identifier[elements] , identifier[first] ) | def find(self, selector: str='*', *, containing: _Containing=None, clean: bool=False, first: bool=False, _encoding: str=None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing] # depends on [control=['if'], data=[]]
encoding = _encoding or self.encoding
elements = [Element(element=found, url=self.url, default_encoding=encoding) for found in self.pq(selector)]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
elements.reverse() # depends on [control=['if'], data=[]]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element) # depends on [control=['for'], data=['element']] # depends on [control=['if'], data=[]]
return _get_first_or_list(elements, first) |
def copyData(self, source):
"""
Subclasses may override this method.
If so, they should call the super.
"""
for attr in self.copyAttributes:
selfValue = getattr(self, attr)
sourceValue = getattr(source, attr)
if isinstance(selfValue, BaseObject):
selfValue.copyData(sourceValue)
else:
setattr(self, attr, sourceValue) | def function[copyData, parameter[self, source]]:
constant[
Subclasses may override this method.
If so, they should call the super.
]
for taget[name[attr]] in starred[name[self].copyAttributes] begin[:]
variable[selfValue] assign[=] call[name[getattr], parameter[name[self], name[attr]]]
variable[sourceValue] assign[=] call[name[getattr], parameter[name[source], name[attr]]]
if call[name[isinstance], parameter[name[selfValue], name[BaseObject]]] begin[:]
call[name[selfValue].copyData, parameter[name[sourceValue]]] | keyword[def] identifier[copyData] ( identifier[self] , identifier[source] ):
literal[string]
keyword[for] identifier[attr] keyword[in] identifier[self] . identifier[copyAttributes] :
identifier[selfValue] = identifier[getattr] ( identifier[self] , identifier[attr] )
identifier[sourceValue] = identifier[getattr] ( identifier[source] , identifier[attr] )
keyword[if] identifier[isinstance] ( identifier[selfValue] , identifier[BaseObject] ):
identifier[selfValue] . identifier[copyData] ( identifier[sourceValue] )
keyword[else] :
identifier[setattr] ( identifier[self] , identifier[attr] , identifier[sourceValue] ) | def copyData(self, source):
"""
Subclasses may override this method.
If so, they should call the super.
"""
for attr in self.copyAttributes:
selfValue = getattr(self, attr)
sourceValue = getattr(source, attr)
if isinstance(selfValue, BaseObject):
selfValue.copyData(sourceValue) # depends on [control=['if'], data=[]]
else:
setattr(self, attr, sourceValue) # depends on [control=['for'], data=['attr']] |
def error(msg: str, resource: Optional['Resource'] = None, stream_id: Optional[int] = None):
"""
Logs a message to the Pulumi CLI's error channel, associating it with a resource
and stream_id if provided.
:param str msg: The message to send to the Pulumi CLI.
:param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI.
:param Optional[int] stream_id: If provided, associate this message with a stream of other messages.
"""
engine = get_engine()
if engine is not None:
_log(engine, engine_pb2.ERROR, msg, resource, stream_id)
else:
print("error: " + msg, file=sys.stderr) | def function[error, parameter[msg, resource, stream_id]]:
constant[
Logs a message to the Pulumi CLI's error channel, associating it with a resource
and stream_id if provided.
:param str msg: The message to send to the Pulumi CLI.
:param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI.
:param Optional[int] stream_id: If provided, associate this message with a stream of other messages.
]
variable[engine] assign[=] call[name[get_engine], parameter[]]
if compare[name[engine] is_not constant[None]] begin[:]
call[name[_log], parameter[name[engine], name[engine_pb2].ERROR, name[msg], name[resource], name[stream_id]]] | keyword[def] identifier[error] ( identifier[msg] : identifier[str] , identifier[resource] : identifier[Optional] [ literal[string] ]= keyword[None] , identifier[stream_id] : identifier[Optional] [ identifier[int] ]= keyword[None] ):
literal[string]
identifier[engine] = identifier[get_engine] ()
keyword[if] identifier[engine] keyword[is] keyword[not] keyword[None] :
identifier[_log] ( identifier[engine] , identifier[engine_pb2] . identifier[ERROR] , identifier[msg] , identifier[resource] , identifier[stream_id] )
keyword[else] :
identifier[print] ( literal[string] + identifier[msg] , identifier[file] = identifier[sys] . identifier[stderr] ) | def error(msg: str, resource: Optional['Resource']=None, stream_id: Optional[int]=None):
"""
Logs a message to the Pulumi CLI's error channel, associating it with a resource
and stream_id if provided.
:param str msg: The message to send to the Pulumi CLI.
:param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI.
:param Optional[int] stream_id: If provided, associate this message with a stream of other messages.
"""
engine = get_engine()
if engine is not None:
_log(engine, engine_pb2.ERROR, msg, resource, stream_id) # depends on [control=['if'], data=['engine']]
else:
print('error: ' + msg, file=sys.stderr) |
def is_netcdf(url):
'''
Returns True if the URL points to a valid local netCDF file
:param str url: Location of file on the file system
'''
# Try an obvious exclusion of remote resources
if url.startswith('http'):
return False
# If it's a known extension, give it a shot
if url.endswith('nc'):
return True
# Brute force
with open(url, 'rb') as f:
magic_number = f.read(4)
if len(magic_number) < 4:
return False
if is_classic_netcdf(magic_number):
return True
elif is_hdf5(magic_number):
return True
return False | def function[is_netcdf, parameter[url]]:
constant[
Returns True if the URL points to a valid local netCDF file
:param str url: Location of file on the file system
]
if call[name[url].startswith, parameter[constant[http]]] begin[:]
return[constant[False]]
if call[name[url].endswith, parameter[constant[nc]]] begin[:]
return[constant[True]]
with call[name[open], parameter[name[url], constant[rb]]] begin[:]
variable[magic_number] assign[=] call[name[f].read, parameter[constant[4]]]
if compare[call[name[len], parameter[name[magic_number]]] less[<] constant[4]] begin[:]
return[constant[False]]
if call[name[is_classic_netcdf], parameter[name[magic_number]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_netcdf] ( identifier[url] ):
literal[string]
keyword[if] identifier[url] . identifier[startswith] ( literal[string] ):
keyword[return] keyword[False]
keyword[if] identifier[url] . identifier[endswith] ( literal[string] ):
keyword[return] keyword[True]
keyword[with] identifier[open] ( identifier[url] , literal[string] ) keyword[as] identifier[f] :
identifier[magic_number] = identifier[f] . identifier[read] ( literal[int] )
keyword[if] identifier[len] ( identifier[magic_number] )< literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[is_classic_netcdf] ( identifier[magic_number] ):
keyword[return] keyword[True]
keyword[elif] identifier[is_hdf5] ( identifier[magic_number] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_netcdf(url):
"""
Returns True if the URL points to a valid local netCDF file
:param str url: Location of file on the file system
"""
# Try an obvious exclusion of remote resources
if url.startswith('http'):
return False # depends on [control=['if'], data=[]]
# If it's a known extension, give it a shot
if url.endswith('nc'):
return True # depends on [control=['if'], data=[]]
# Brute force
with open(url, 'rb') as f:
magic_number = f.read(4)
if len(magic_number) < 4:
return False # depends on [control=['if'], data=[]]
if is_classic_netcdf(magic_number):
return True # depends on [control=['if'], data=[]]
elif is_hdf5(magic_number):
return True # depends on [control=['if'], data=[]]
return False # depends on [control=['with'], data=['f']] |
def directory_generator(dirname, trim=0):
"""
yields a tuple of (relative filename, chunking function). The
chunking function can be called to open and iterate over the
contents of the filename.
"""
def gather(collect, dirname, fnames):
for fname in fnames:
df = join(dirname, fname)
if not isdir(df):
collect.append(df)
collect = list()
walk(dirname, gather, collect)
for fname in collect:
yield fname[trim:], file_chunk(fname) | def function[directory_generator, parameter[dirname, trim]]:
constant[
yields a tuple of (relative filename, chunking function). The
chunking function can be called to open and iterate over the
contents of the filename.
]
def function[gather, parameter[collect, dirname, fnames]]:
for taget[name[fname]] in starred[name[fnames]] begin[:]
variable[df] assign[=] call[name[join], parameter[name[dirname], name[fname]]]
if <ast.UnaryOp object at 0x7da1b0b1beb0> begin[:]
call[name[collect].append, parameter[name[df]]]
variable[collect] assign[=] call[name[list], parameter[]]
call[name[walk], parameter[name[dirname], name[gather], name[collect]]]
for taget[name[fname]] in starred[name[collect]] begin[:]
<ast.Yield object at 0x7da1b0b18670> | keyword[def] identifier[directory_generator] ( identifier[dirname] , identifier[trim] = literal[int] ):
literal[string]
keyword[def] identifier[gather] ( identifier[collect] , identifier[dirname] , identifier[fnames] ):
keyword[for] identifier[fname] keyword[in] identifier[fnames] :
identifier[df] = identifier[join] ( identifier[dirname] , identifier[fname] )
keyword[if] keyword[not] identifier[isdir] ( identifier[df] ):
identifier[collect] . identifier[append] ( identifier[df] )
identifier[collect] = identifier[list] ()
identifier[walk] ( identifier[dirname] , identifier[gather] , identifier[collect] )
keyword[for] identifier[fname] keyword[in] identifier[collect] :
keyword[yield] identifier[fname] [ identifier[trim] :], identifier[file_chunk] ( identifier[fname] ) | def directory_generator(dirname, trim=0):
"""
yields a tuple of (relative filename, chunking function). The
chunking function can be called to open and iterate over the
contents of the filename.
"""
def gather(collect, dirname, fnames):
for fname in fnames:
df = join(dirname, fname)
if not isdir(df):
collect.append(df) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']]
collect = list()
walk(dirname, gather, collect)
for fname in collect:
yield (fname[trim:], file_chunk(fname)) # depends on [control=['for'], data=['fname']] |
def tocimxml(self, ignore_path=False):
"""
Return the CIM-XML representation of this CIM instance,
as an object of an appropriate subclass of :term:`Element`.
If the instance has no instance path specified or if `ignore_path` is
`True`, the returned CIM-XML representation is an `INSTANCE` element
consistent with :term:`DSP0201`. This is the required element for
representing embedded instances.
Otherwise, if the instance path of the instance has no namespace
specified, the returned CIM-XML representation is an
`VALUE.NAMEDINSTANCE` element consistent with :term:`DSP0201`.
Otherwise, if the instance path of the instance has no host specified,
the returned CIM-XML representation is a
`VALUE.OBJECTWITHLOCALPATH` element consistent with :term:`DSP0201`.
Otherwise, the returned CIM-XML representation is a
`VALUE.INSTANCEWITHPATH` element consistent with :term:`DSP0201`.
The order of properties and qualifiers in the returned CIM-XML
representation is preserved from the :class:`~pywbem.CIMInstance`
object.
Parameters:
ignore_path (:class:`py:bool`): Ignore the path of the instance, even
if a path is specified.
Returns:
The CIM-XML representation, as an object of an appropriate subclass
of :term:`Element`.
"""
# The items in the self.properties dictionary are required to be
# CIMProperty objects and that is ensured when initializing a
# CIMInstance object and when setting the entire self.properties
# attribute. However, even though the items in the dictionary are
# required to be CIMProperty objects, the user technically can set
# them to anything.
# Before pywbem 0.12, the dictionary items were converted to
# CIMProperty objects. This was only done for properties of
# CIMinstance, but not for any other CIM object attribute.
# In pywbem 0.12, this conversion was removed because it worked only
# for bool and string types anyway. Because that conversion had been
# implemented, we still check that the items are CIMProperty objects.
for key, value in self.properties.items():
try:
assert isinstance(value, CIMProperty)
except AssertionError:
raise TypeError(
_format("Property {0!A} has invalid type: {1} (must be "
"CIMProperty)", key, builtin_type(value)))
instance_xml = cim_xml.INSTANCE(
self.classname,
properties=[p.tocimxml() for p in self.properties.values()],
qualifiers=[q.tocimxml() for q in self.qualifiers.values()])
if self.path is None or ignore_path:
return instance_xml
if self.path.namespace is None:
return cim_xml.VALUE_NAMEDINSTANCE(
self.path.tocimxml(),
instance_xml)
if self.path.host is None:
return cim_xml.VALUE_OBJECTWITHLOCALPATH(
self.path.tocimxml(),
instance_xml)
return cim_xml.VALUE_INSTANCEWITHPATH(
self.path.tocimxml(),
instance_xml) | def function[tocimxml, parameter[self, ignore_path]]:
constant[
Return the CIM-XML representation of this CIM instance,
as an object of an appropriate subclass of :term:`Element`.
If the instance has no instance path specified or if `ignore_path` is
`True`, the returned CIM-XML representation is an `INSTANCE` element
consistent with :term:`DSP0201`. This is the required element for
representing embedded instances.
Otherwise, if the instance path of the instance has no namespace
specified, the returned CIM-XML representation is an
`VALUE.NAMEDINSTANCE` element consistent with :term:`DSP0201`.
Otherwise, if the instance path of the instance has no host specified,
the returned CIM-XML representation is a
`VALUE.OBJECTWITHLOCALPATH` element consistent with :term:`DSP0201`.
Otherwise, the returned CIM-XML representation is a
`VALUE.INSTANCEWITHPATH` element consistent with :term:`DSP0201`.
The order of properties and qualifiers in the returned CIM-XML
representation is preserved from the :class:`~pywbem.CIMInstance`
object.
Parameters:
ignore_path (:class:`py:bool`): Ignore the path of the instance, even
if a path is specified.
Returns:
The CIM-XML representation, as an object of an appropriate subclass
of :term:`Element`.
]
for taget[tuple[[<ast.Name object at 0x7da207f99900>, <ast.Name object at 0x7da207f9bf70>]]] in starred[call[name[self].properties.items, parameter[]]] begin[:]
<ast.Try object at 0x7da207f992d0>
variable[instance_xml] assign[=] call[name[cim_xml].INSTANCE, parameter[name[self].classname]]
if <ast.BoolOp object at 0x7da204344a30> begin[:]
return[name[instance_xml]]
if compare[name[self].path.namespace is constant[None]] begin[:]
return[call[name[cim_xml].VALUE_NAMEDINSTANCE, parameter[call[name[self].path.tocimxml, parameter[]], name[instance_xml]]]]
if compare[name[self].path.host is constant[None]] begin[:]
return[call[name[cim_xml].VALUE_OBJECTWITHLOCALPATH, parameter[call[name[self].path.tocimxml, parameter[]], name[instance_xml]]]]
return[call[name[cim_xml].VALUE_INSTANCEWITHPATH, parameter[call[name[self].path.tocimxml, parameter[]], name[instance_xml]]]] | keyword[def] identifier[tocimxml] ( identifier[self] , identifier[ignore_path] = keyword[False] ):
literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[properties] . identifier[items] ():
keyword[try] :
keyword[assert] identifier[isinstance] ( identifier[value] , identifier[CIMProperty] )
keyword[except] identifier[AssertionError] :
keyword[raise] identifier[TypeError] (
identifier[_format] ( literal[string]
literal[string] , identifier[key] , identifier[builtin_type] ( identifier[value] )))
identifier[instance_xml] = identifier[cim_xml] . identifier[INSTANCE] (
identifier[self] . identifier[classname] ,
identifier[properties] =[ identifier[p] . identifier[tocimxml] () keyword[for] identifier[p] keyword[in] identifier[self] . identifier[properties] . identifier[values] ()],
identifier[qualifiers] =[ identifier[q] . identifier[tocimxml] () keyword[for] identifier[q] keyword[in] identifier[self] . identifier[qualifiers] . identifier[values] ()])
keyword[if] identifier[self] . identifier[path] keyword[is] keyword[None] keyword[or] identifier[ignore_path] :
keyword[return] identifier[instance_xml]
keyword[if] identifier[self] . identifier[path] . identifier[namespace] keyword[is] keyword[None] :
keyword[return] identifier[cim_xml] . identifier[VALUE_NAMEDINSTANCE] (
identifier[self] . identifier[path] . identifier[tocimxml] (),
identifier[instance_xml] )
keyword[if] identifier[self] . identifier[path] . identifier[host] keyword[is] keyword[None] :
keyword[return] identifier[cim_xml] . identifier[VALUE_OBJECTWITHLOCALPATH] (
identifier[self] . identifier[path] . identifier[tocimxml] (),
identifier[instance_xml] )
keyword[return] identifier[cim_xml] . identifier[VALUE_INSTANCEWITHPATH] (
identifier[self] . identifier[path] . identifier[tocimxml] (),
identifier[instance_xml] ) | def tocimxml(self, ignore_path=False):
"""
Return the CIM-XML representation of this CIM instance,
as an object of an appropriate subclass of :term:`Element`.
If the instance has no instance path specified or if `ignore_path` is
`True`, the returned CIM-XML representation is an `INSTANCE` element
consistent with :term:`DSP0201`. This is the required element for
representing embedded instances.
Otherwise, if the instance path of the instance has no namespace
specified, the returned CIM-XML representation is an
`VALUE.NAMEDINSTANCE` element consistent with :term:`DSP0201`.
Otherwise, if the instance path of the instance has no host specified,
the returned CIM-XML representation is a
`VALUE.OBJECTWITHLOCALPATH` element consistent with :term:`DSP0201`.
Otherwise, the returned CIM-XML representation is a
`VALUE.INSTANCEWITHPATH` element consistent with :term:`DSP0201`.
The order of properties and qualifiers in the returned CIM-XML
representation is preserved from the :class:`~pywbem.CIMInstance`
object.
Parameters:
ignore_path (:class:`py:bool`): Ignore the path of the instance, even
if a path is specified.
Returns:
The CIM-XML representation, as an object of an appropriate subclass
of :term:`Element`.
"""
# The items in the self.properties dictionary are required to be
# CIMProperty objects and that is ensured when initializing a
# CIMInstance object and when setting the entire self.properties
# attribute. However, even though the items in the dictionary are
# required to be CIMProperty objects, the user technically can set
# them to anything.
# Before pywbem 0.12, the dictionary items were converted to
# CIMProperty objects. This was only done for properties of
# CIMinstance, but not for any other CIM object attribute.
# In pywbem 0.12, this conversion was removed because it worked only
# for bool and string types anyway. Because that conversion had been
# implemented, we still check that the items are CIMProperty objects.
for (key, value) in self.properties.items():
try:
assert isinstance(value, CIMProperty) # depends on [control=['try'], data=[]]
except AssertionError:
raise TypeError(_format('Property {0!A} has invalid type: {1} (must be CIMProperty)', key, builtin_type(value))) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
instance_xml = cim_xml.INSTANCE(self.classname, properties=[p.tocimxml() for p in self.properties.values()], qualifiers=[q.tocimxml() for q in self.qualifiers.values()])
if self.path is None or ignore_path:
return instance_xml # depends on [control=['if'], data=[]]
if self.path.namespace is None:
return cim_xml.VALUE_NAMEDINSTANCE(self.path.tocimxml(), instance_xml) # depends on [control=['if'], data=[]]
if self.path.host is None:
return cim_xml.VALUE_OBJECTWITHLOCALPATH(self.path.tocimxml(), instance_xml) # depends on [control=['if'], data=[]]
return cim_xml.VALUE_INSTANCEWITHPATH(self.path.tocimxml(), instance_xml) |
def increment(self, amount=1, output=True):
"""Increment progress by a given amount (defaults to 1) and (optionally) output the progress
status.
"""
self._current += amount
if output:
# NOTE: This algorithm averages out the time it has taken to get to the current / total to
# determine how much time is left. So this is best used with jobs that have steps whose
# run time is roughly the same.
now = datetime.now()
elapsed_time = now - self._started_at
expected_finish = self._started_at + elapsed_time * (1 / (self._current / self._total))
remaining_time = expected_finish - now
if self._format_time_remaining:
remaining_time_str = self._format_time_remaining(remaining_time)
else:
remaining_time_str = str(remaining_time)
tab_count = self._message.count("\t")
sys.stdout.write("\r" + "\t" * tab_count + " " * (len(self._message) - tab_count))
if isinstance(self._format, str):
self._message = self._format.format(self._current, self._total, remaining_time_str)
else:
self._message = self._format(self._current, self._total, remaining_time_str)
sys.stdout.write("\r" + self._message)
sys.stdout.flush() | def function[increment, parameter[self, amount, output]]:
constant[Increment progress by a given amount (defaults to 1) and (optionally) output the progress
status.
]
<ast.AugAssign object at 0x7da1b13650f0>
if name[output] begin[:]
variable[now] assign[=] call[name[datetime].now, parameter[]]
variable[elapsed_time] assign[=] binary_operation[name[now] - name[self]._started_at]
variable[expected_finish] assign[=] binary_operation[name[self]._started_at + binary_operation[name[elapsed_time] * binary_operation[constant[1] / binary_operation[name[self]._current / name[self]._total]]]]
variable[remaining_time] assign[=] binary_operation[name[expected_finish] - name[now]]
if name[self]._format_time_remaining begin[:]
variable[remaining_time_str] assign[=] call[name[self]._format_time_remaining, parameter[name[remaining_time]]]
variable[tab_count] assign[=] call[name[self]._message.count, parameter[constant[ ]]]
call[name[sys].stdout.write, parameter[binary_operation[binary_operation[constant[
] + binary_operation[constant[ ] * name[tab_count]]] + binary_operation[constant[ ] * binary_operation[call[name[len], parameter[name[self]._message]] - name[tab_count]]]]]]
if call[name[isinstance], parameter[name[self]._format, name[str]]] begin[:]
name[self]._message assign[=] call[name[self]._format.format, parameter[name[self]._current, name[self]._total, name[remaining_time_str]]]
call[name[sys].stdout.write, parameter[binary_operation[constant[
] + name[self]._message]]]
call[name[sys].stdout.flush, parameter[]] | keyword[def] identifier[increment] ( identifier[self] , identifier[amount] = literal[int] , identifier[output] = keyword[True] ):
literal[string]
identifier[self] . identifier[_current] += identifier[amount]
keyword[if] identifier[output] :
identifier[now] = identifier[datetime] . identifier[now] ()
identifier[elapsed_time] = identifier[now] - identifier[self] . identifier[_started_at]
identifier[expected_finish] = identifier[self] . identifier[_started_at] + identifier[elapsed_time] *( literal[int] /( identifier[self] . identifier[_current] / identifier[self] . identifier[_total] ))
identifier[remaining_time] = identifier[expected_finish] - identifier[now]
keyword[if] identifier[self] . identifier[_format_time_remaining] :
identifier[remaining_time_str] = identifier[self] . identifier[_format_time_remaining] ( identifier[remaining_time] )
keyword[else] :
identifier[remaining_time_str] = identifier[str] ( identifier[remaining_time] )
identifier[tab_count] = identifier[self] . identifier[_message] . identifier[count] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] + literal[string] * identifier[tab_count] + literal[string] *( identifier[len] ( identifier[self] . identifier[_message] )- identifier[tab_count] ))
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_format] , identifier[str] ):
identifier[self] . identifier[_message] = identifier[self] . identifier[_format] . identifier[format] ( identifier[self] . identifier[_current] , identifier[self] . identifier[_total] , identifier[remaining_time_str] )
keyword[else] :
identifier[self] . identifier[_message] = identifier[self] . identifier[_format] ( identifier[self] . identifier[_current] , identifier[self] . identifier[_total] , identifier[remaining_time_str] )
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] + identifier[self] . identifier[_message] )
identifier[sys] . identifier[stdout] . identifier[flush] () | def increment(self, amount=1, output=True):
"""Increment progress by a given amount (defaults to 1) and (optionally) output the progress
status.
"""
self._current += amount
if output:
# NOTE: This algorithm averages out the time it has taken to get to the current / total to
# determine how much time is left. So this is best used with jobs that have steps whose
# run time is roughly the same.
now = datetime.now()
elapsed_time = now - self._started_at
expected_finish = self._started_at + elapsed_time * (1 / (self._current / self._total))
remaining_time = expected_finish - now
if self._format_time_remaining:
remaining_time_str = self._format_time_remaining(remaining_time) # depends on [control=['if'], data=[]]
else:
remaining_time_str = str(remaining_time)
tab_count = self._message.count('\t')
sys.stdout.write('\r' + '\t' * tab_count + ' ' * (len(self._message) - tab_count))
if isinstance(self._format, str):
self._message = self._format.format(self._current, self._total, remaining_time_str) # depends on [control=['if'], data=[]]
else:
self._message = self._format(self._current, self._total, remaining_time_str)
sys.stdout.write('\r' + self._message)
sys.stdout.flush() # depends on [control=['if'], data=[]] |
def _fill_role_cache_batch(self, principals, overwrite=False):
"""Fill role cache for `principals` (Users and/or Groups), in order to
avoid too many queries when checking role access with 'has_role'."""
if not self.app_state.use_cache:
return
query = db.session.query(RoleAssignment)
users = {u for u in principals if isinstance(u, User)}
groups = {g for g in principals if isinstance(g, Group)}
groups |= {g for u in users for g in u.groups}
if not overwrite:
users = {u for u in users if not self._has_role_cache(u)}
groups = {g for g in groups if not self._has_role_cache(g)}
if not (users or groups):
return
# ensure principals processed here will have role cache. Thus users or
# groups without any role will have an empty role cache, to avoid
# unneeded individual DB query when calling self._fill_role_cache(p).
for p in chain(users, groups):
self._set_role_cache(p, {})
filter_cond = []
if users:
filter_cond.append(RoleAssignment.user_id.in_(u.id for u in users))
if groups:
filter_cond.append(RoleAssignment.group_id.in_(g.id for g in groups))
query = query.filter(sql.or_(*filter_cond))
ra_users = {}
ra_groups = {}
for ra in query.all():
if ra.user:
all_roles = ra_users.setdefault(ra.user, {})
else:
all_roles = ra_groups.setdefault(ra.group, {})
object_key = (
f"{ra.object.entity_type}:{ra.object_id:d}"
if ra.object is not None
else None
)
all_roles.setdefault(object_key, set()).add(ra.role)
for group, all_roles in ra_groups.items():
self._set_role_cache(group, all_roles)
for user, all_roles in ra_users.items():
for gr in user.groups:
group_roles = self._fill_role_cache(gr)
for object_key, roles in group_roles.items():
obj_roles = all_roles.setdefault(object_key, set())
obj_roles |= roles
self._set_role_cache(user, all_roles) | def function[_fill_role_cache_batch, parameter[self, principals, overwrite]]:
constant[Fill role cache for `principals` (Users and/or Groups), in order to
avoid too many queries when checking role access with 'has_role'.]
if <ast.UnaryOp object at 0x7da1b196f610> begin[:]
return[None]
variable[query] assign[=] call[name[db].session.query, parameter[name[RoleAssignment]]]
variable[users] assign[=] <ast.SetComp object at 0x7da1b196f130>
variable[groups] assign[=] <ast.SetComp object at 0x7da1b196d6f0>
<ast.AugAssign object at 0x7da1b196f7f0>
if <ast.UnaryOp object at 0x7da1b196fa00> begin[:]
variable[users] assign[=] <ast.SetComp object at 0x7da1b196ed40>
variable[groups] assign[=] <ast.SetComp object at 0x7da1b196f370>
if <ast.UnaryOp object at 0x7da1b196e4a0> begin[:]
return[None]
for taget[name[p]] in starred[call[name[chain], parameter[name[users], name[groups]]]] begin[:]
call[name[self]._set_role_cache, parameter[name[p], dictionary[[], []]]]
variable[filter_cond] assign[=] list[[]]
if name[users] begin[:]
call[name[filter_cond].append, parameter[call[name[RoleAssignment].user_id.in_, parameter[<ast.GeneratorExp object at 0x7da1b196f880>]]]]
if name[groups] begin[:]
call[name[filter_cond].append, parameter[call[name[RoleAssignment].group_id.in_, parameter[<ast.GeneratorExp object at 0x7da1b196c880>]]]]
variable[query] assign[=] call[name[query].filter, parameter[call[name[sql].or_, parameter[<ast.Starred object at 0x7da1b196d7b0>]]]]
variable[ra_users] assign[=] dictionary[[], []]
variable[ra_groups] assign[=] dictionary[[], []]
for taget[name[ra]] in starred[call[name[query].all, parameter[]]] begin[:]
if name[ra].user begin[:]
variable[all_roles] assign[=] call[name[ra_users].setdefault, parameter[name[ra].user, dictionary[[], []]]]
variable[object_key] assign[=] <ast.IfExp object at 0x7da204963dc0>
call[call[name[all_roles].setdefault, parameter[name[object_key], call[name[set], parameter[]]]].add, parameter[name[ra].role]]
for taget[tuple[[<ast.Name object at 0x7da2049630a0>, <ast.Name object at 0x7da204962cb0>]]] in starred[call[name[ra_groups].items, parameter[]]] begin[:]
call[name[self]._set_role_cache, parameter[name[group], name[all_roles]]]
for taget[tuple[[<ast.Name object at 0x7da2049615a0>, <ast.Name object at 0x7da204962110>]]] in starred[call[name[ra_users].items, parameter[]]] begin[:]
for taget[name[gr]] in starred[name[user].groups] begin[:]
variable[group_roles] assign[=] call[name[self]._fill_role_cache, parameter[name[gr]]]
for taget[tuple[[<ast.Name object at 0x7da2046236d0>, <ast.Name object at 0x7da204620940>]]] in starred[call[name[group_roles].items, parameter[]]] begin[:]
variable[obj_roles] assign[=] call[name[all_roles].setdefault, parameter[name[object_key], call[name[set], parameter[]]]]
<ast.AugAssign object at 0x7da2046238b0>
call[name[self]._set_role_cache, parameter[name[user], name[all_roles]]] | keyword[def] identifier[_fill_role_cache_batch] ( identifier[self] , identifier[principals] , identifier[overwrite] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[app_state] . identifier[use_cache] :
keyword[return]
identifier[query] = identifier[db] . identifier[session] . identifier[query] ( identifier[RoleAssignment] )
identifier[users] ={ identifier[u] keyword[for] identifier[u] keyword[in] identifier[principals] keyword[if] identifier[isinstance] ( identifier[u] , identifier[User] )}
identifier[groups] ={ identifier[g] keyword[for] identifier[g] keyword[in] identifier[principals] keyword[if] identifier[isinstance] ( identifier[g] , identifier[Group] )}
identifier[groups] |={ identifier[g] keyword[for] identifier[u] keyword[in] identifier[users] keyword[for] identifier[g] keyword[in] identifier[u] . identifier[groups] }
keyword[if] keyword[not] identifier[overwrite] :
identifier[users] ={ identifier[u] keyword[for] identifier[u] keyword[in] identifier[users] keyword[if] keyword[not] identifier[self] . identifier[_has_role_cache] ( identifier[u] )}
identifier[groups] ={ identifier[g] keyword[for] identifier[g] keyword[in] identifier[groups] keyword[if] keyword[not] identifier[self] . identifier[_has_role_cache] ( identifier[g] )}
keyword[if] keyword[not] ( identifier[users] keyword[or] identifier[groups] ):
keyword[return]
keyword[for] identifier[p] keyword[in] identifier[chain] ( identifier[users] , identifier[groups] ):
identifier[self] . identifier[_set_role_cache] ( identifier[p] ,{})
identifier[filter_cond] =[]
keyword[if] identifier[users] :
identifier[filter_cond] . identifier[append] ( identifier[RoleAssignment] . identifier[user_id] . identifier[in_] ( identifier[u] . identifier[id] keyword[for] identifier[u] keyword[in] identifier[users] ))
keyword[if] identifier[groups] :
identifier[filter_cond] . identifier[append] ( identifier[RoleAssignment] . identifier[group_id] . identifier[in_] ( identifier[g] . identifier[id] keyword[for] identifier[g] keyword[in] identifier[groups] ))
identifier[query] = identifier[query] . identifier[filter] ( identifier[sql] . identifier[or_] (* identifier[filter_cond] ))
identifier[ra_users] ={}
identifier[ra_groups] ={}
keyword[for] identifier[ra] keyword[in] identifier[query] . identifier[all] ():
keyword[if] identifier[ra] . identifier[user] :
identifier[all_roles] = identifier[ra_users] . identifier[setdefault] ( identifier[ra] . identifier[user] ,{})
keyword[else] :
identifier[all_roles] = identifier[ra_groups] . identifier[setdefault] ( identifier[ra] . identifier[group] ,{})
identifier[object_key] =(
literal[string]
keyword[if] identifier[ra] . identifier[object] keyword[is] keyword[not] keyword[None]
keyword[else] keyword[None]
)
identifier[all_roles] . identifier[setdefault] ( identifier[object_key] , identifier[set] ()). identifier[add] ( identifier[ra] . identifier[role] )
keyword[for] identifier[group] , identifier[all_roles] keyword[in] identifier[ra_groups] . identifier[items] ():
identifier[self] . identifier[_set_role_cache] ( identifier[group] , identifier[all_roles] )
keyword[for] identifier[user] , identifier[all_roles] keyword[in] identifier[ra_users] . identifier[items] ():
keyword[for] identifier[gr] keyword[in] identifier[user] . identifier[groups] :
identifier[group_roles] = identifier[self] . identifier[_fill_role_cache] ( identifier[gr] )
keyword[for] identifier[object_key] , identifier[roles] keyword[in] identifier[group_roles] . identifier[items] ():
identifier[obj_roles] = identifier[all_roles] . identifier[setdefault] ( identifier[object_key] , identifier[set] ())
identifier[obj_roles] |= identifier[roles]
identifier[self] . identifier[_set_role_cache] ( identifier[user] , identifier[all_roles] ) | def _fill_role_cache_batch(self, principals, overwrite=False):
"""Fill role cache for `principals` (Users and/or Groups), in order to
avoid too many queries when checking role access with 'has_role'."""
if not self.app_state.use_cache:
return # depends on [control=['if'], data=[]]
query = db.session.query(RoleAssignment)
users = {u for u in principals if isinstance(u, User)}
groups = {g for g in principals if isinstance(g, Group)}
groups |= {g for u in users for g in u.groups}
if not overwrite:
users = {u for u in users if not self._has_role_cache(u)}
groups = {g for g in groups if not self._has_role_cache(g)} # depends on [control=['if'], data=[]]
if not (users or groups):
return # depends on [control=['if'], data=[]]
# ensure principals processed here will have role cache. Thus users or
# groups without any role will have an empty role cache, to avoid
# unneeded individual DB query when calling self._fill_role_cache(p).
for p in chain(users, groups):
self._set_role_cache(p, {}) # depends on [control=['for'], data=['p']]
filter_cond = []
if users:
filter_cond.append(RoleAssignment.user_id.in_((u.id for u in users))) # depends on [control=['if'], data=[]]
if groups:
filter_cond.append(RoleAssignment.group_id.in_((g.id for g in groups))) # depends on [control=['if'], data=[]]
query = query.filter(sql.or_(*filter_cond))
ra_users = {}
ra_groups = {}
for ra in query.all():
if ra.user:
all_roles = ra_users.setdefault(ra.user, {}) # depends on [control=['if'], data=[]]
else:
all_roles = ra_groups.setdefault(ra.group, {})
object_key = f'{ra.object.entity_type}:{ra.object_id:d}' if ra.object is not None else None
all_roles.setdefault(object_key, set()).add(ra.role) # depends on [control=['for'], data=['ra']]
for (group, all_roles) in ra_groups.items():
self._set_role_cache(group, all_roles) # depends on [control=['for'], data=[]]
for (user, all_roles) in ra_users.items():
for gr in user.groups:
group_roles = self._fill_role_cache(gr)
for (object_key, roles) in group_roles.items():
obj_roles = all_roles.setdefault(object_key, set())
obj_roles |= roles # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['gr']]
self._set_role_cache(user, all_roles) # depends on [control=['for'], data=[]] |
def handle_common_arguments(parsed_args):
"""
Handles the arguments defined by :meth:`~make_common_parser`
:param parsed_args: Argument parsed with ``argparse`` (``Namespace``)
:return: An :class:`~InitFileHandler` object
:raise IOError: Initial or run script not found
"""
# Setup the logger
logging.basicConfig(
level=logging.DEBUG if parsed_args.verbose else logging.WARNING
)
# Framework properties dictionary
props = {}
# Read the initial configuration script
init = InitFileHandler()
if not parsed_args.init_empty:
if not parsed_args.init_conf_exclusive:
# Load default configuration
init.load()
# Load the given configuration file
conf_file = parsed_args.init_conf_exclusive or parsed_args.init_conf
if conf_file:
init.load(conf_file)
# Normalize configuration
init.normalize()
# Set initial framework properties
props.update(init.properties)
# Compute framework properties
for prop_def in parsed_args.properties or []:
key, value = prop_def.split("=", 1)
props[key] = value
# Check initial run script(s)
if parsed_args.init_script:
path = props[PROP_INIT_FILE] = _resolve_file(parsed_args.init_script)
if not path:
raise IOError(
"Initial script file not found: {0}".format(
parsed_args.init_script
)
)
if parsed_args.run_script:
# Find the file
path = props[PROP_RUN_FILE] = _resolve_file(parsed_args.run_script)
if not path:
raise IOError(
"Script file not found: {0}".format(parsed_args.run_script)
)
# Update the stored configuration
init.properties.update(props)
return init | def function[handle_common_arguments, parameter[parsed_args]]:
constant[
Handles the arguments defined by :meth:`~make_common_parser`
:param parsed_args: Argument parsed with ``argparse`` (``Namespace``)
:return: An :class:`~InitFileHandler` object
:raise IOError: Initial or run script not found
]
call[name[logging].basicConfig, parameter[]]
variable[props] assign[=] dictionary[[], []]
variable[init] assign[=] call[name[InitFileHandler], parameter[]]
if <ast.UnaryOp object at 0x7da1b0473a00> begin[:]
if <ast.UnaryOp object at 0x7da1b0471d50> begin[:]
call[name[init].load, parameter[]]
variable[conf_file] assign[=] <ast.BoolOp object at 0x7da1b0472290>
if name[conf_file] begin[:]
call[name[init].load, parameter[name[conf_file]]]
call[name[init].normalize, parameter[]]
call[name[props].update, parameter[name[init].properties]]
for taget[name[prop_def]] in starred[<ast.BoolOp object at 0x7da1b04d5120>] begin[:]
<ast.Tuple object at 0x7da1b04d66b0> assign[=] call[name[prop_def].split, parameter[constant[=], constant[1]]]
call[name[props]][name[key]] assign[=] name[value]
if name[parsed_args].init_script begin[:]
variable[path] assign[=] call[name[_resolve_file], parameter[name[parsed_args].init_script]]
if <ast.UnaryOp object at 0x7da1b04d57e0> begin[:]
<ast.Raise object at 0x7da1b04d42b0>
if name[parsed_args].run_script begin[:]
variable[path] assign[=] call[name[_resolve_file], parameter[name[parsed_args].run_script]]
if <ast.UnaryOp object at 0x7da1b04d71f0> begin[:]
<ast.Raise object at 0x7da1b04d6950>
call[name[init].properties.update, parameter[name[props]]]
return[name[init]] | keyword[def] identifier[handle_common_arguments] ( identifier[parsed_args] ):
literal[string]
identifier[logging] . identifier[basicConfig] (
identifier[level] = identifier[logging] . identifier[DEBUG] keyword[if] identifier[parsed_args] . identifier[verbose] keyword[else] identifier[logging] . identifier[WARNING]
)
identifier[props] ={}
identifier[init] = identifier[InitFileHandler] ()
keyword[if] keyword[not] identifier[parsed_args] . identifier[init_empty] :
keyword[if] keyword[not] identifier[parsed_args] . identifier[init_conf_exclusive] :
identifier[init] . identifier[load] ()
identifier[conf_file] = identifier[parsed_args] . identifier[init_conf_exclusive] keyword[or] identifier[parsed_args] . identifier[init_conf]
keyword[if] identifier[conf_file] :
identifier[init] . identifier[load] ( identifier[conf_file] )
identifier[init] . identifier[normalize] ()
identifier[props] . identifier[update] ( identifier[init] . identifier[properties] )
keyword[for] identifier[prop_def] keyword[in] identifier[parsed_args] . identifier[properties] keyword[or] []:
identifier[key] , identifier[value] = identifier[prop_def] . identifier[split] ( literal[string] , literal[int] )
identifier[props] [ identifier[key] ]= identifier[value]
keyword[if] identifier[parsed_args] . identifier[init_script] :
identifier[path] = identifier[props] [ identifier[PROP_INIT_FILE] ]= identifier[_resolve_file] ( identifier[parsed_args] . identifier[init_script] )
keyword[if] keyword[not] identifier[path] :
keyword[raise] identifier[IOError] (
literal[string] . identifier[format] (
identifier[parsed_args] . identifier[init_script]
)
)
keyword[if] identifier[parsed_args] . identifier[run_script] :
identifier[path] = identifier[props] [ identifier[PROP_RUN_FILE] ]= identifier[_resolve_file] ( identifier[parsed_args] . identifier[run_script] )
keyword[if] keyword[not] identifier[path] :
keyword[raise] identifier[IOError] (
literal[string] . identifier[format] ( identifier[parsed_args] . identifier[run_script] )
)
identifier[init] . identifier[properties] . identifier[update] ( identifier[props] )
keyword[return] identifier[init] | def handle_common_arguments(parsed_args):
"""
Handles the arguments defined by :meth:`~make_common_parser`
:param parsed_args: Argument parsed with ``argparse`` (``Namespace``)
:return: An :class:`~InitFileHandler` object
:raise IOError: Initial or run script not found
"""
# Setup the logger
logging.basicConfig(level=logging.DEBUG if parsed_args.verbose else logging.WARNING)
# Framework properties dictionary
props = {}
# Read the initial configuration script
init = InitFileHandler()
if not parsed_args.init_empty:
if not parsed_args.init_conf_exclusive:
# Load default configuration
init.load() # depends on [control=['if'], data=[]]
# Load the given configuration file
conf_file = parsed_args.init_conf_exclusive or parsed_args.init_conf
if conf_file:
init.load(conf_file) # depends on [control=['if'], data=[]]
# Normalize configuration
init.normalize()
# Set initial framework properties
props.update(init.properties) # depends on [control=['if'], data=[]]
# Compute framework properties
for prop_def in parsed_args.properties or []:
(key, value) = prop_def.split('=', 1)
props[key] = value # depends on [control=['for'], data=['prop_def']]
# Check initial run script(s)
if parsed_args.init_script:
path = props[PROP_INIT_FILE] = _resolve_file(parsed_args.init_script)
if not path:
raise IOError('Initial script file not found: {0}'.format(parsed_args.init_script)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if parsed_args.run_script:
# Find the file
path = props[PROP_RUN_FILE] = _resolve_file(parsed_args.run_script)
if not path:
raise IOError('Script file not found: {0}'.format(parsed_args.run_script)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Update the stored configuration
init.properties.update(props)
return init |
def load(self, optional_cfg_files=None):
""" Actually load the configuation from either the default location or the given directory.
"""
optional_cfg_files = optional_cfg_files or []
# Guard against coding errors
if self._loaded:
raise RuntimeError("INTERNAL ERROR: Attempt to load configuration twice!")
try:
# Load configuration
namespace = {}
self._set_defaults(namespace, optional_cfg_files)
self._load_ini(namespace, os.path.join(self.config_dir, self.CONFIG_INI))
for cfg_file in optional_cfg_files:
if not os.path.isabs(cfg_file):
cfg_file = os.path.join(self.config_dir, cfg_file)
if os.path.exists(cfg_file):
self._load_ini(namespace, cfg_file)
self._validate_namespace(namespace)
self._load_py(namespace, namespace["config_script"])
self._validate_namespace(namespace)
for callback in namespace["config_validator_callbacks"]:
callback()
except ConfigParser.ParsingError as exc:
raise error.UserError(exc)
# Ready to go...
self._loaded = True | def function[load, parameter[self, optional_cfg_files]]:
constant[ Actually load the configuation from either the default location or the given directory.
]
variable[optional_cfg_files] assign[=] <ast.BoolOp object at 0x7da1b135bf70>
if name[self]._loaded begin[:]
<ast.Raise object at 0x7da1b135bc70>
<ast.Try object at 0x7da1b135bca0>
name[self]._loaded assign[=] constant[True] | keyword[def] identifier[load] ( identifier[self] , identifier[optional_cfg_files] = keyword[None] ):
literal[string]
identifier[optional_cfg_files] = identifier[optional_cfg_files] keyword[or] []
keyword[if] identifier[self] . identifier[_loaded] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[try] :
identifier[namespace] ={}
identifier[self] . identifier[_set_defaults] ( identifier[namespace] , identifier[optional_cfg_files] )
identifier[self] . identifier[_load_ini] ( identifier[namespace] , identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[config_dir] , identifier[self] . identifier[CONFIG_INI] ))
keyword[for] identifier[cfg_file] keyword[in] identifier[optional_cfg_files] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isabs] ( identifier[cfg_file] ):
identifier[cfg_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[config_dir] , identifier[cfg_file] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[cfg_file] ):
identifier[self] . identifier[_load_ini] ( identifier[namespace] , identifier[cfg_file] )
identifier[self] . identifier[_validate_namespace] ( identifier[namespace] )
identifier[self] . identifier[_load_py] ( identifier[namespace] , identifier[namespace] [ literal[string] ])
identifier[self] . identifier[_validate_namespace] ( identifier[namespace] )
keyword[for] identifier[callback] keyword[in] identifier[namespace] [ literal[string] ]:
identifier[callback] ()
keyword[except] identifier[ConfigParser] . identifier[ParsingError] keyword[as] identifier[exc] :
keyword[raise] identifier[error] . identifier[UserError] ( identifier[exc] )
identifier[self] . identifier[_loaded] = keyword[True] | def load(self, optional_cfg_files=None):
""" Actually load the configuation from either the default location or the given directory.
"""
optional_cfg_files = optional_cfg_files or []
# Guard against coding errors
if self._loaded:
raise RuntimeError('INTERNAL ERROR: Attempt to load configuration twice!') # depends on [control=['if'], data=[]]
try:
# Load configuration
namespace = {}
self._set_defaults(namespace, optional_cfg_files)
self._load_ini(namespace, os.path.join(self.config_dir, self.CONFIG_INI))
for cfg_file in optional_cfg_files:
if not os.path.isabs(cfg_file):
cfg_file = os.path.join(self.config_dir, cfg_file) # depends on [control=['if'], data=[]]
if os.path.exists(cfg_file):
self._load_ini(namespace, cfg_file) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cfg_file']]
self._validate_namespace(namespace)
self._load_py(namespace, namespace['config_script'])
self._validate_namespace(namespace)
for callback in namespace['config_validator_callbacks']:
callback() # depends on [control=['for'], data=['callback']] # depends on [control=['try'], data=[]]
except ConfigParser.ParsingError as exc:
raise error.UserError(exc) # depends on [control=['except'], data=['exc']]
# Ready to go...
self._loaded = True |
def remove_event_subscriber(self, name, ws):
"""
Remove a websocket subscriber from an event.
name -- name of the event
ws -- the websocket
"""
if name in self.available_events and \
ws in self.available_events[name]['subscribers']:
self.available_events[name]['subscribers'].remove(ws) | def function[remove_event_subscriber, parameter[self, name, ws]]:
constant[
Remove a websocket subscriber from an event.
name -- name of the event
ws -- the websocket
]
if <ast.BoolOp object at 0x7da1b0338d30> begin[:]
call[call[call[name[self].available_events][name[name]]][constant[subscribers]].remove, parameter[name[ws]]] | keyword[def] identifier[remove_event_subscriber] ( identifier[self] , identifier[name] , identifier[ws] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[available_events] keyword[and] identifier[ws] keyword[in] identifier[self] . identifier[available_events] [ identifier[name] ][ literal[string] ]:
identifier[self] . identifier[available_events] [ identifier[name] ][ literal[string] ]. identifier[remove] ( identifier[ws] ) | def remove_event_subscriber(self, name, ws):
"""
Remove a websocket subscriber from an event.
name -- name of the event
ws -- the websocket
"""
if name in self.available_events and ws in self.available_events[name]['subscribers']:
self.available_events[name]['subscribers'].remove(ws) # depends on [control=['if'], data=[]] |
def __extract_modules(self, loader, name, is_pkg):
""" if module found load module and save all attributes in the module found """
mod = loader.find_module(name).load_module(name)
""" find the attribute method on each module """
if hasattr(mod, '__method__'):
""" register to the blueprint if method attribute found """
module_router = ModuleRouter(mod,
ignore_names=self.__serialize_module_paths()
).register_route(app=self.application, name=name)
self.__routers.extend(module_router.routers)
self.__modules.append(mod)
else:
""" prompt not found notification """
# print('{} has no module attribute method'.format(mod))
pass | def function[__extract_modules, parameter[self, loader, name, is_pkg]]:
constant[ if module found load module and save all attributes in the module found ]
variable[mod] assign[=] call[call[name[loader].find_module, parameter[name[name]]].load_module, parameter[name[name]]]
constant[ find the attribute method on each module ]
if call[name[hasattr], parameter[name[mod], constant[__method__]]] begin[:]
constant[ register to the blueprint if method attribute found ]
variable[module_router] assign[=] call[call[name[ModuleRouter], parameter[name[mod]]].register_route, parameter[]]
call[name[self].__routers.extend, parameter[name[module_router].routers]]
call[name[self].__modules.append, parameter[name[mod]]] | keyword[def] identifier[__extract_modules] ( identifier[self] , identifier[loader] , identifier[name] , identifier[is_pkg] ):
literal[string]
identifier[mod] = identifier[loader] . identifier[find_module] ( identifier[name] ). identifier[load_module] ( identifier[name] )
literal[string]
keyword[if] identifier[hasattr] ( identifier[mod] , literal[string] ):
literal[string]
identifier[module_router] = identifier[ModuleRouter] ( identifier[mod] ,
identifier[ignore_names] = identifier[self] . identifier[__serialize_module_paths] ()
). identifier[register_route] ( identifier[app] = identifier[self] . identifier[application] , identifier[name] = identifier[name] )
identifier[self] . identifier[__routers] . identifier[extend] ( identifier[module_router] . identifier[routers] )
identifier[self] . identifier[__modules] . identifier[append] ( identifier[mod] )
keyword[else] :
literal[string]
keyword[pass] | def __extract_modules(self, loader, name, is_pkg):
""" if module found load module and save all attributes in the module found """
mod = loader.find_module(name).load_module(name)
' find the attribute method on each module '
if hasattr(mod, '__method__'):
' register to the blueprint if method attribute found '
module_router = ModuleRouter(mod, ignore_names=self.__serialize_module_paths()).register_route(app=self.application, name=name)
self.__routers.extend(module_router.routers)
self.__modules.append(mod) # depends on [control=['if'], data=[]]
else:
' prompt not found notification '
# print('{} has no module attribute method'.format(mod))
pass |
def _interp(self):
"""Interpolate the cartesian coordinates.
"""
if np.all(self.hrow_indices == self.row_indices):
return self._interp1d()
xpoints, ypoints = np.meshgrid(self.hrow_indices,
self.hcol_indices)
for num, data in enumerate(self.tie_data):
spl = RectBivariateSpline(self.row_indices,
self.col_indices,
data,
s=0,
kx=self.kx_,
ky=self.ky_)
new_data_ = spl.ev(xpoints.ravel(), ypoints.ravel())
self.new_data[num] = new_data_.reshape(xpoints.shape).T.copy(order='C') | def function[_interp, parameter[self]]:
constant[Interpolate the cartesian coordinates.
]
if call[name[np].all, parameter[compare[name[self].hrow_indices equal[==] name[self].row_indices]]] begin[:]
return[call[name[self]._interp1d, parameter[]]]
<ast.Tuple object at 0x7da1b1c7d720> assign[=] call[name[np].meshgrid, parameter[name[self].hrow_indices, name[self].hcol_indices]]
for taget[tuple[[<ast.Name object at 0x7da1b1c7ca30>, <ast.Name object at 0x7da1b1c7c970>]]] in starred[call[name[enumerate], parameter[name[self].tie_data]]] begin[:]
variable[spl] assign[=] call[name[RectBivariateSpline], parameter[name[self].row_indices, name[self].col_indices, name[data]]]
variable[new_data_] assign[=] call[name[spl].ev, parameter[call[name[xpoints].ravel, parameter[]], call[name[ypoints].ravel, parameter[]]]]
call[name[self].new_data][name[num]] assign[=] call[call[name[new_data_].reshape, parameter[name[xpoints].shape]].T.copy, parameter[]] | keyword[def] identifier[_interp] ( identifier[self] ):
literal[string]
keyword[if] identifier[np] . identifier[all] ( identifier[self] . identifier[hrow_indices] == identifier[self] . identifier[row_indices] ):
keyword[return] identifier[self] . identifier[_interp1d] ()
identifier[xpoints] , identifier[ypoints] = identifier[np] . identifier[meshgrid] ( identifier[self] . identifier[hrow_indices] ,
identifier[self] . identifier[hcol_indices] )
keyword[for] identifier[num] , identifier[data] keyword[in] identifier[enumerate] ( identifier[self] . identifier[tie_data] ):
identifier[spl] = identifier[RectBivariateSpline] ( identifier[self] . identifier[row_indices] ,
identifier[self] . identifier[col_indices] ,
identifier[data] ,
identifier[s] = literal[int] ,
identifier[kx] = identifier[self] . identifier[kx_] ,
identifier[ky] = identifier[self] . identifier[ky_] )
identifier[new_data_] = identifier[spl] . identifier[ev] ( identifier[xpoints] . identifier[ravel] (), identifier[ypoints] . identifier[ravel] ())
identifier[self] . identifier[new_data] [ identifier[num] ]= identifier[new_data_] . identifier[reshape] ( identifier[xpoints] . identifier[shape] ). identifier[T] . identifier[copy] ( identifier[order] = literal[string] ) | def _interp(self):
"""Interpolate the cartesian coordinates.
"""
if np.all(self.hrow_indices == self.row_indices):
return self._interp1d() # depends on [control=['if'], data=[]]
(xpoints, ypoints) = np.meshgrid(self.hrow_indices, self.hcol_indices)
for (num, data) in enumerate(self.tie_data):
spl = RectBivariateSpline(self.row_indices, self.col_indices, data, s=0, kx=self.kx_, ky=self.ky_)
new_data_ = spl.ev(xpoints.ravel(), ypoints.ravel())
self.new_data[num] = new_data_.reshape(xpoints.shape).T.copy(order='C') # depends on [control=['for'], data=[]] |
def enter(self, screen_id, y, x, default_action, allowed_actions, formats):
"""Informs the target about a drag and drop enter event.
in screen_id of type int
The screen ID where the drag and drop event occurred.
in y of type int
Y-position of the event.
in x of type int
X-position of the event.
in default_action of type :class:`DnDAction`
The default action to use.
in allowed_actions of type :class:`DnDAction`
The actions which are allowed.
in formats of type str
The supported MIME types.
return result_action of type :class:`DnDAction`
The resulting action of this event.
raises :class:`VBoxErrorVmError`
VMM device is not available.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
if not isinstance(y, baseinteger):
raise TypeError("y can only be an instance of type baseinteger")
if not isinstance(x, baseinteger):
raise TypeError("x can only be an instance of type baseinteger")
if not isinstance(default_action, DnDAction):
raise TypeError("default_action can only be an instance of type DnDAction")
if not isinstance(allowed_actions, list):
raise TypeError("allowed_actions can only be an instance of type list")
for a in allowed_actions[:10]:
if not isinstance(a, DnDAction):
raise TypeError(
"array can only contain objects of type DnDAction")
if not isinstance(formats, list):
raise TypeError("formats can only be an instance of type list")
for a in formats[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
result_action = self._call("enter",
in_p=[screen_id, y, x, default_action, allowed_actions, formats])
result_action = DnDAction(result_action)
return result_action | def function[enter, parameter[self, screen_id, y, x, default_action, allowed_actions, formats]]:
constant[Informs the target about a drag and drop enter event.
in screen_id of type int
The screen ID where the drag and drop event occurred.
in y of type int
Y-position of the event.
in x of type int
X-position of the event.
in default_action of type :class:`DnDAction`
The default action to use.
in allowed_actions of type :class:`DnDAction`
The actions which are allowed.
in formats of type str
The supported MIME types.
return result_action of type :class:`DnDAction`
The resulting action of this event.
raises :class:`VBoxErrorVmError`
VMM device is not available.
]
if <ast.UnaryOp object at 0x7da2044c35e0> begin[:]
<ast.Raise object at 0x7da2044c2c20>
if <ast.UnaryOp object at 0x7da2044c0340> begin[:]
<ast.Raise object at 0x7da2044c1b70>
if <ast.UnaryOp object at 0x7da2044c0430> begin[:]
<ast.Raise object at 0x7da2044c3fd0>
if <ast.UnaryOp object at 0x7da2044c0dc0> begin[:]
<ast.Raise object at 0x7da2044c01f0>
if <ast.UnaryOp object at 0x7da2044c2500> begin[:]
<ast.Raise object at 0x7da2044c3670>
for taget[name[a]] in starred[call[name[allowed_actions]][<ast.Slice object at 0x7da2044c1720>]] begin[:]
if <ast.UnaryOp object at 0x7da2044c0400> begin[:]
<ast.Raise object at 0x7da2044c3d30>
if <ast.UnaryOp object at 0x7da2044c2c50> begin[:]
<ast.Raise object at 0x7da2044c21a0>
for taget[name[a]] in starred[call[name[formats]][<ast.Slice object at 0x7da2044c0b80>]] begin[:]
if <ast.UnaryOp object at 0x7da2044c27d0> begin[:]
<ast.Raise object at 0x7da2044c2620>
variable[result_action] assign[=] call[name[self]._call, parameter[constant[enter]]]
variable[result_action] assign[=] call[name[DnDAction], parameter[name[result_action]]]
return[name[result_action]] | keyword[def] identifier[enter] ( identifier[self] , identifier[screen_id] , identifier[y] , identifier[x] , identifier[default_action] , identifier[allowed_actions] , identifier[formats] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[screen_id] , identifier[baseinteger] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[y] , identifier[baseinteger] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[x] , identifier[baseinteger] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[default_action] , identifier[DnDAction] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[allowed_actions] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[for] identifier[a] keyword[in] identifier[allowed_actions] [: literal[int] ]:
keyword[if] keyword[not] identifier[isinstance] ( identifier[a] , identifier[DnDAction] ):
keyword[raise] identifier[TypeError] (
literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[formats] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[for] identifier[a] keyword[in] identifier[formats] [: literal[int] ]:
keyword[if] keyword[not] identifier[isinstance] ( identifier[a] , identifier[basestring] ):
keyword[raise] identifier[TypeError] (
literal[string] )
identifier[result_action] = identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[screen_id] , identifier[y] , identifier[x] , identifier[default_action] , identifier[allowed_actions] , identifier[formats] ])
identifier[result_action] = identifier[DnDAction] ( identifier[result_action] )
keyword[return] identifier[result_action] | def enter(self, screen_id, y, x, default_action, allowed_actions, formats):
"""Informs the target about a drag and drop enter event.
in screen_id of type int
The screen ID where the drag and drop event occurred.
in y of type int
Y-position of the event.
in x of type int
X-position of the event.
in default_action of type :class:`DnDAction`
The default action to use.
in allowed_actions of type :class:`DnDAction`
The actions which are allowed.
in formats of type str
The supported MIME types.
return result_action of type :class:`DnDAction`
The resulting action of this event.
raises :class:`VBoxErrorVmError`
VMM device is not available.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError('screen_id can only be an instance of type baseinteger') # depends on [control=['if'], data=[]]
if not isinstance(y, baseinteger):
raise TypeError('y can only be an instance of type baseinteger') # depends on [control=['if'], data=[]]
if not isinstance(x, baseinteger):
raise TypeError('x can only be an instance of type baseinteger') # depends on [control=['if'], data=[]]
if not isinstance(default_action, DnDAction):
raise TypeError('default_action can only be an instance of type DnDAction') # depends on [control=['if'], data=[]]
if not isinstance(allowed_actions, list):
raise TypeError('allowed_actions can only be an instance of type list') # depends on [control=['if'], data=[]]
for a in allowed_actions[:10]:
if not isinstance(a, DnDAction):
raise TypeError('array can only contain objects of type DnDAction') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
if not isinstance(formats, list):
raise TypeError('formats can only be an instance of type list') # depends on [control=['if'], data=[]]
for a in formats[:10]:
if not isinstance(a, basestring):
raise TypeError('array can only contain objects of type basestring') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
result_action = self._call('enter', in_p=[screen_id, y, x, default_action, allowed_actions, formats])
result_action = DnDAction(result_action)
return result_action |
def dump_rexobj_results(rexobj, options=None):
'''
print all the results.
'''
print("-" * 60)
print("Match count: ", rexobj.res_count)
matches = rexobj.matches
for match in matches:
print("Loc:", match.loc, ":: ")
for key in match.named_groups.keys():
print("%s: %s" %
(key, match.named_groups[key]))
print("") | def function[dump_rexobj_results, parameter[rexobj, options]]:
constant[
print all the results.
]
call[name[print], parameter[binary_operation[constant[-] * constant[60]]]]
call[name[print], parameter[constant[Match count: ], name[rexobj].res_count]]
variable[matches] assign[=] name[rexobj].matches
for taget[name[match]] in starred[name[matches]] begin[:]
call[name[print], parameter[constant[Loc:], name[match].loc, constant[:: ]]]
for taget[name[key]] in starred[call[name[match].named_groups.keys, parameter[]]] begin[:]
call[name[print], parameter[binary_operation[constant[%s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2581cc0>, <ast.Subscript object at 0x7da1b2583eb0>]]]]]
call[name[print], parameter[constant[]]] | keyword[def] identifier[dump_rexobj_results] ( identifier[rexobj] , identifier[options] = keyword[None] ):
literal[string]
identifier[print] ( literal[string] * literal[int] )
identifier[print] ( literal[string] , identifier[rexobj] . identifier[res_count] )
identifier[matches] = identifier[rexobj] . identifier[matches]
keyword[for] identifier[match] keyword[in] identifier[matches] :
identifier[print] ( literal[string] , identifier[match] . identifier[loc] , literal[string] )
keyword[for] identifier[key] keyword[in] identifier[match] . identifier[named_groups] . identifier[keys] ():
identifier[print] ( literal[string] %
( identifier[key] , identifier[match] . identifier[named_groups] [ identifier[key] ]))
identifier[print] ( literal[string] ) | def dump_rexobj_results(rexobj, options=None):
"""
print all the results.
"""
print('-' * 60)
print('Match count: ', rexobj.res_count)
matches = rexobj.matches
for match in matches:
print('Loc:', match.loc, ':: ')
for key in match.named_groups.keys():
print('%s: %s' % (key, match.named_groups[key])) # depends on [control=['for'], data=['key']]
print('') # depends on [control=['for'], data=['match']] |
def get_samples(in_file):
"""Retrieve samples present in a VCF file
"""
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.startswith("#CHROM"):
parts = line.strip().split("\t")
return parts[9:]
raise ValueError("Did not find sample header in VCF file %s" % in_file) | def function[get_samples, parameter[in_file]]:
constant[Retrieve samples present in a VCF file
]
with call[name[utils].open_gzipsafe, parameter[name[in_file]]] begin[:]
for taget[name[line]] in starred[name[in_handle]] begin[:]
if call[name[line].startswith, parameter[constant[#CHROM]]] begin[:]
variable[parts] assign[=] call[call[name[line].strip, parameter[]].split, parameter[constant[ ]]]
return[call[name[parts]][<ast.Slice object at 0x7da18f09dc90>]]
<ast.Raise object at 0x7da18f09f730> | keyword[def] identifier[get_samples] ( identifier[in_file] ):
literal[string]
keyword[with] identifier[utils] . identifier[open_gzipsafe] ( identifier[in_file] ) keyword[as] identifier[in_handle] :
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[parts] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )
keyword[return] identifier[parts] [ literal[int] :]
keyword[raise] identifier[ValueError] ( literal[string] % identifier[in_file] ) | def get_samples(in_file):
"""Retrieve samples present in a VCF file
"""
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.startswith('#CHROM'):
parts = line.strip().split('\t')
return parts[9:] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']]
raise ValueError('Did not find sample header in VCF file %s' % in_file) |
def _update_function_transition_graph(self, src_node_key, dst_node_key, jumpkind='Ijk_Boring', ins_addr=None,
stmt_idx=None, confirmed=None):
"""
Update transition graphs of functions in function manager based on information passed in.
:param str jumpkind: Jumpkind.
:param CFGNode src_node: Source CFGNode
:param CFGNode dst_node: Destionation CFGNode
:param int ret_addr: The theoretical return address for calls
:return: None
"""
if dst_node_key is not None:
dst_node = self._graph_get_node(dst_node_key, terminator_for_nonexistent_node=True)
dst_node_addr = dst_node.addr
dst_codenode = dst_node.to_codenode()
dst_node_func_addr = dst_node.function_address
else:
dst_node = None
dst_node_addr = None
dst_codenode = None
dst_node_func_addr = None
if src_node_key is None:
if dst_node is None:
raise ValueError("Either src_node_key or dst_node_key must be specified.")
self.kb.functions.function(dst_node.function_address, create=True)._register_nodes(True,
dst_codenode
)
return
src_node = self._graph_get_node(src_node_key, terminator_for_nonexistent_node=True)
# Update the transition graph of current function
if jumpkind == "Ijk_Call":
ret_addr = src_node.return_target
ret_node = self.kb.functions.function(
src_node.function_address,
create=True
)._get_block(ret_addr).codenode if ret_addr else None
self.kb.functions._add_call_to(
function_addr=src_node.function_address,
from_node=src_node.to_codenode(),
to_addr=dst_node_addr,
retn_node=ret_node,
syscall=False,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
)
if jumpkind.startswith('Ijk_Sys'):
self.kb.functions._add_call_to(
function_addr=src_node.function_address,
from_node=src_node.to_codenode(),
to_addr=dst_node_addr,
retn_node=src_node.to_codenode(), # For syscalls, they are returning to the address of themselves
syscall=True,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
)
elif jumpkind == 'Ijk_Ret':
# Create a return site for current function
self.kb.functions._add_return_from(
function_addr=src_node.function_address,
from_node=src_node.to_codenode(),
to_node=dst_codenode,
)
if dst_node is not None:
# Create a returning edge in the caller function
self.kb.functions._add_return_from_call(
function_addr=dst_node_func_addr,
src_function_addr=src_node.function_address,
to_node=dst_codenode,
)
elif jumpkind == 'Ijk_FakeRet':
self.kb.functions._add_fakeret_to(
function_addr=src_node.function_address,
from_node=src_node.to_codenode(),
to_node=dst_codenode,
confirmed=confirmed,
)
elif jumpkind in ('Ijk_Boring', 'Ijk_InvalICache'):
src_obj = self.project.loader.find_object_containing(src_node.addr)
dest_obj = self.project.loader.find_object_containing(dst_node.addr) if dst_node is not None else None
if src_obj is dest_obj:
# Jump/branch within the same object. Might be an outside jump.
to_outside = src_node.function_address != dst_node_func_addr
else:
# Jump/branch between different objects. Must be an outside jump.
to_outside = True
if not to_outside:
self.kb.functions._add_transition_to(
function_addr=src_node.function_address,
from_node=src_node.to_codenode(),
to_node=dst_codenode,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
)
else:
self.kb.functions._add_outside_transition_to(
function_addr=src_node.function_address,
from_node=src_node.to_codenode(),
to_node=dst_codenode,
to_function_addr=dst_node_func_addr,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
) | def function[_update_function_transition_graph, parameter[self, src_node_key, dst_node_key, jumpkind, ins_addr, stmt_idx, confirmed]]:
constant[
Update transition graphs of functions in function manager based on information passed in.
:param str jumpkind: Jumpkind.
:param CFGNode src_node: Source CFGNode
:param CFGNode dst_node: Destionation CFGNode
:param int ret_addr: The theoretical return address for calls
:return: None
]
if compare[name[dst_node_key] is_not constant[None]] begin[:]
variable[dst_node] assign[=] call[name[self]._graph_get_node, parameter[name[dst_node_key]]]
variable[dst_node_addr] assign[=] name[dst_node].addr
variable[dst_codenode] assign[=] call[name[dst_node].to_codenode, parameter[]]
variable[dst_node_func_addr] assign[=] name[dst_node].function_address
if compare[name[src_node_key] is constant[None]] begin[:]
if compare[name[dst_node] is constant[None]] begin[:]
<ast.Raise object at 0x7da18eb57fa0>
call[call[name[self].kb.functions.function, parameter[name[dst_node].function_address]]._register_nodes, parameter[constant[True], name[dst_codenode]]]
return[None]
variable[src_node] assign[=] call[name[self]._graph_get_node, parameter[name[src_node_key]]]
if compare[name[jumpkind] equal[==] constant[Ijk_Call]] begin[:]
variable[ret_addr] assign[=] name[src_node].return_target
variable[ret_node] assign[=] <ast.IfExp object at 0x7da18eb56cb0>
call[name[self].kb.functions._add_call_to, parameter[]]
if call[name[jumpkind].startswith, parameter[constant[Ijk_Sys]]] begin[:]
call[name[self].kb.functions._add_call_to, parameter[]] | keyword[def] identifier[_update_function_transition_graph] ( identifier[self] , identifier[src_node_key] , identifier[dst_node_key] , identifier[jumpkind] = literal[string] , identifier[ins_addr] = keyword[None] ,
identifier[stmt_idx] = keyword[None] , identifier[confirmed] = keyword[None] ):
literal[string]
keyword[if] identifier[dst_node_key] keyword[is] keyword[not] keyword[None] :
identifier[dst_node] = identifier[self] . identifier[_graph_get_node] ( identifier[dst_node_key] , identifier[terminator_for_nonexistent_node] = keyword[True] )
identifier[dst_node_addr] = identifier[dst_node] . identifier[addr]
identifier[dst_codenode] = identifier[dst_node] . identifier[to_codenode] ()
identifier[dst_node_func_addr] = identifier[dst_node] . identifier[function_address]
keyword[else] :
identifier[dst_node] = keyword[None]
identifier[dst_node_addr] = keyword[None]
identifier[dst_codenode] = keyword[None]
identifier[dst_node_func_addr] = keyword[None]
keyword[if] identifier[src_node_key] keyword[is] keyword[None] :
keyword[if] identifier[dst_node] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[kb] . identifier[functions] . identifier[function] ( identifier[dst_node] . identifier[function_address] , identifier[create] = keyword[True] ). identifier[_register_nodes] ( keyword[True] ,
identifier[dst_codenode]
)
keyword[return]
identifier[src_node] = identifier[self] . identifier[_graph_get_node] ( identifier[src_node_key] , identifier[terminator_for_nonexistent_node] = keyword[True] )
keyword[if] identifier[jumpkind] == literal[string] :
identifier[ret_addr] = identifier[src_node] . identifier[return_target]
identifier[ret_node] = identifier[self] . identifier[kb] . identifier[functions] . identifier[function] (
identifier[src_node] . identifier[function_address] ,
identifier[create] = keyword[True]
). identifier[_get_block] ( identifier[ret_addr] ). identifier[codenode] keyword[if] identifier[ret_addr] keyword[else] keyword[None]
identifier[self] . identifier[kb] . identifier[functions] . identifier[_add_call_to] (
identifier[function_addr] = identifier[src_node] . identifier[function_address] ,
identifier[from_node] = identifier[src_node] . identifier[to_codenode] (),
identifier[to_addr] = identifier[dst_node_addr] ,
identifier[retn_node] = identifier[ret_node] ,
identifier[syscall] = keyword[False] ,
identifier[ins_addr] = identifier[ins_addr] ,
identifier[stmt_idx] = identifier[stmt_idx] ,
)
keyword[if] identifier[jumpkind] . identifier[startswith] ( literal[string] ):
identifier[self] . identifier[kb] . identifier[functions] . identifier[_add_call_to] (
identifier[function_addr] = identifier[src_node] . identifier[function_address] ,
identifier[from_node] = identifier[src_node] . identifier[to_codenode] (),
identifier[to_addr] = identifier[dst_node_addr] ,
identifier[retn_node] = identifier[src_node] . identifier[to_codenode] (),
identifier[syscall] = keyword[True] ,
identifier[ins_addr] = identifier[ins_addr] ,
identifier[stmt_idx] = identifier[stmt_idx] ,
)
keyword[elif] identifier[jumpkind] == literal[string] :
identifier[self] . identifier[kb] . identifier[functions] . identifier[_add_return_from] (
identifier[function_addr] = identifier[src_node] . identifier[function_address] ,
identifier[from_node] = identifier[src_node] . identifier[to_codenode] (),
identifier[to_node] = identifier[dst_codenode] ,
)
keyword[if] identifier[dst_node] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[kb] . identifier[functions] . identifier[_add_return_from_call] (
identifier[function_addr] = identifier[dst_node_func_addr] ,
identifier[src_function_addr] = identifier[src_node] . identifier[function_address] ,
identifier[to_node] = identifier[dst_codenode] ,
)
keyword[elif] identifier[jumpkind] == literal[string] :
identifier[self] . identifier[kb] . identifier[functions] . identifier[_add_fakeret_to] (
identifier[function_addr] = identifier[src_node] . identifier[function_address] ,
identifier[from_node] = identifier[src_node] . identifier[to_codenode] (),
identifier[to_node] = identifier[dst_codenode] ,
identifier[confirmed] = identifier[confirmed] ,
)
keyword[elif] identifier[jumpkind] keyword[in] ( literal[string] , literal[string] ):
identifier[src_obj] = identifier[self] . identifier[project] . identifier[loader] . identifier[find_object_containing] ( identifier[src_node] . identifier[addr] )
identifier[dest_obj] = identifier[self] . identifier[project] . identifier[loader] . identifier[find_object_containing] ( identifier[dst_node] . identifier[addr] ) keyword[if] identifier[dst_node] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
keyword[if] identifier[src_obj] keyword[is] identifier[dest_obj] :
identifier[to_outside] = identifier[src_node] . identifier[function_address] != identifier[dst_node_func_addr]
keyword[else] :
identifier[to_outside] = keyword[True]
keyword[if] keyword[not] identifier[to_outside] :
identifier[self] . identifier[kb] . identifier[functions] . identifier[_add_transition_to] (
identifier[function_addr] = identifier[src_node] . identifier[function_address] ,
identifier[from_node] = identifier[src_node] . identifier[to_codenode] (),
identifier[to_node] = identifier[dst_codenode] ,
identifier[ins_addr] = identifier[ins_addr] ,
identifier[stmt_idx] = identifier[stmt_idx] ,
)
keyword[else] :
identifier[self] . identifier[kb] . identifier[functions] . identifier[_add_outside_transition_to] (
identifier[function_addr] = identifier[src_node] . identifier[function_address] ,
identifier[from_node] = identifier[src_node] . identifier[to_codenode] (),
identifier[to_node] = identifier[dst_codenode] ,
identifier[to_function_addr] = identifier[dst_node_func_addr] ,
identifier[ins_addr] = identifier[ins_addr] ,
identifier[stmt_idx] = identifier[stmt_idx] ,
) | def _update_function_transition_graph(self, src_node_key, dst_node_key, jumpkind='Ijk_Boring', ins_addr=None, stmt_idx=None, confirmed=None):
"""
Update transition graphs of functions in function manager based on information passed in.
:param str jumpkind: Jumpkind.
:param CFGNode src_node: Source CFGNode
:param CFGNode dst_node: Destionation CFGNode
:param int ret_addr: The theoretical return address for calls
:return: None
"""
if dst_node_key is not None:
dst_node = self._graph_get_node(dst_node_key, terminator_for_nonexistent_node=True)
dst_node_addr = dst_node.addr
dst_codenode = dst_node.to_codenode()
dst_node_func_addr = dst_node.function_address # depends on [control=['if'], data=['dst_node_key']]
else:
dst_node = None
dst_node_addr = None
dst_codenode = None
dst_node_func_addr = None
if src_node_key is None:
if dst_node is None:
raise ValueError('Either src_node_key or dst_node_key must be specified.') # depends on [control=['if'], data=[]]
self.kb.functions.function(dst_node.function_address, create=True)._register_nodes(True, dst_codenode)
return # depends on [control=['if'], data=[]]
src_node = self._graph_get_node(src_node_key, terminator_for_nonexistent_node=True)
# Update the transition graph of current function
if jumpkind == 'Ijk_Call':
ret_addr = src_node.return_target
ret_node = self.kb.functions.function(src_node.function_address, create=True)._get_block(ret_addr).codenode if ret_addr else None
self.kb.functions._add_call_to(function_addr=src_node.function_address, from_node=src_node.to_codenode(), to_addr=dst_node_addr, retn_node=ret_node, syscall=False, ins_addr=ins_addr, stmt_idx=stmt_idx) # depends on [control=['if'], data=[]]
if jumpkind.startswith('Ijk_Sys'): # For syscalls, they are returning to the address of themselves
self.kb.functions._add_call_to(function_addr=src_node.function_address, from_node=src_node.to_codenode(), to_addr=dst_node_addr, retn_node=src_node.to_codenode(), syscall=True, ins_addr=ins_addr, stmt_idx=stmt_idx) # depends on [control=['if'], data=[]]
elif jumpkind == 'Ijk_Ret':
# Create a return site for current function
self.kb.functions._add_return_from(function_addr=src_node.function_address, from_node=src_node.to_codenode(), to_node=dst_codenode)
if dst_node is not None:
# Create a returning edge in the caller function
self.kb.functions._add_return_from_call(function_addr=dst_node_func_addr, src_function_addr=src_node.function_address, to_node=dst_codenode) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif jumpkind == 'Ijk_FakeRet':
self.kb.functions._add_fakeret_to(function_addr=src_node.function_address, from_node=src_node.to_codenode(), to_node=dst_codenode, confirmed=confirmed) # depends on [control=['if'], data=[]]
elif jumpkind in ('Ijk_Boring', 'Ijk_InvalICache'):
src_obj = self.project.loader.find_object_containing(src_node.addr)
dest_obj = self.project.loader.find_object_containing(dst_node.addr) if dst_node is not None else None
if src_obj is dest_obj:
# Jump/branch within the same object. Might be an outside jump.
to_outside = src_node.function_address != dst_node_func_addr # depends on [control=['if'], data=[]]
else:
# Jump/branch between different objects. Must be an outside jump.
to_outside = True
if not to_outside:
self.kb.functions._add_transition_to(function_addr=src_node.function_address, from_node=src_node.to_codenode(), to_node=dst_codenode, ins_addr=ins_addr, stmt_idx=stmt_idx) # depends on [control=['if'], data=[]]
else:
self.kb.functions._add_outside_transition_to(function_addr=src_node.function_address, from_node=src_node.to_codenode(), to_node=dst_codenode, to_function_addr=dst_node_func_addr, ins_addr=ins_addr, stmt_idx=stmt_idx) # depends on [control=['if'], data=[]] |
def console_output(self, instance=None):
"""Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
"""
if instance is None:
instance = self.instance()
for stage in instance['stages']:
for job in stage['jobs']:
if job['result'] not in self.final_results:
continue
artifact = self.artifact(
instance['counter'],
stage['name'],
job['name'],
stage['counter']
)
output = artifact.get('cruise-output/console.log')
yield (
{
'pipeline': self.name,
'pipeline_counter': instance['counter'],
'stage': stage['name'],
'stage_counter': stage['counter'],
'job': job['name'],
'job_result': job['result'],
},
output.body
) | def function[console_output, parameter[self, instance]]:
constant[Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
]
if compare[name[instance] is constant[None]] begin[:]
variable[instance] assign[=] call[name[self].instance, parameter[]]
for taget[name[stage]] in starred[call[name[instance]][constant[stages]]] begin[:]
for taget[name[job]] in starred[call[name[stage]][constant[jobs]]] begin[:]
if compare[call[name[job]][constant[result]] <ast.NotIn object at 0x7da2590d7190> name[self].final_results] begin[:]
continue
variable[artifact] assign[=] call[name[self].artifact, parameter[call[name[instance]][constant[counter]], call[name[stage]][constant[name]], call[name[job]][constant[name]], call[name[stage]][constant[counter]]]]
variable[output] assign[=] call[name[artifact].get, parameter[constant[cruise-output/console.log]]]
<ast.Yield object at 0x7da1afe71690> | keyword[def] identifier[console_output] ( identifier[self] , identifier[instance] = keyword[None] ):
literal[string]
keyword[if] identifier[instance] keyword[is] keyword[None] :
identifier[instance] = identifier[self] . identifier[instance] ()
keyword[for] identifier[stage] keyword[in] identifier[instance] [ literal[string] ]:
keyword[for] identifier[job] keyword[in] identifier[stage] [ literal[string] ]:
keyword[if] identifier[job] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[final_results] :
keyword[continue]
identifier[artifact] = identifier[self] . identifier[artifact] (
identifier[instance] [ literal[string] ],
identifier[stage] [ literal[string] ],
identifier[job] [ literal[string] ],
identifier[stage] [ literal[string] ]
)
identifier[output] = identifier[artifact] . identifier[get] ( literal[string] )
keyword[yield] (
{
literal[string] : identifier[self] . identifier[name] ,
literal[string] : identifier[instance] [ literal[string] ],
literal[string] : identifier[stage] [ literal[string] ],
literal[string] : identifier[stage] [ literal[string] ],
literal[string] : identifier[job] [ literal[string] ],
literal[string] : identifier[job] [ literal[string] ],
},
identifier[output] . identifier[body]
) | def console_output(self, instance=None):
"""Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
"""
if instance is None:
instance = self.instance() # depends on [control=['if'], data=['instance']]
for stage in instance['stages']:
for job in stage['jobs']:
if job['result'] not in self.final_results:
continue # depends on [control=['if'], data=[]]
artifact = self.artifact(instance['counter'], stage['name'], job['name'], stage['counter'])
output = artifact.get('cruise-output/console.log')
yield ({'pipeline': self.name, 'pipeline_counter': instance['counter'], 'stage': stage['name'], 'stage_counter': stage['counter'], 'job': job['name'], 'job_result': job['result']}, output.body) # depends on [control=['for'], data=['job']] # depends on [control=['for'], data=['stage']] |
def _scaleSinglePoint(point, scale=1, convertToInteger=True):
"""
Scale a single point
"""
x, y = point
if convertToInteger:
return int(round(x * scale)), int(round(y * scale))
else:
return (x * scale, y * scale) | def function[_scaleSinglePoint, parameter[point, scale, convertToInteger]]:
constant[
Scale a single point
]
<ast.Tuple object at 0x7da1b0f3a200> assign[=] name[point]
if name[convertToInteger] begin[:]
return[tuple[[<ast.Call object at 0x7da1b0f3ad10>, <ast.Call object at 0x7da1b0f3a8c0>]]] | keyword[def] identifier[_scaleSinglePoint] ( identifier[point] , identifier[scale] = literal[int] , identifier[convertToInteger] = keyword[True] ):
literal[string]
identifier[x] , identifier[y] = identifier[point]
keyword[if] identifier[convertToInteger] :
keyword[return] identifier[int] ( identifier[round] ( identifier[x] * identifier[scale] )), identifier[int] ( identifier[round] ( identifier[y] * identifier[scale] ))
keyword[else] :
keyword[return] ( identifier[x] * identifier[scale] , identifier[y] * identifier[scale] ) | def _scaleSinglePoint(point, scale=1, convertToInteger=True):
"""
Scale a single point
"""
(x, y) = point
if convertToInteger:
return (int(round(x * scale)), int(round(y * scale))) # depends on [control=['if'], data=[]]
else:
return (x * scale, y * scale) |
def query(self, coords, mode='random_sample'):
"""
Returns A0 at the given coordinates. There are several different query
modes, which handle the probabilistic nature of the map differently.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
mode (Optional[:obj:`str`]): Five different query modes are available:
``'random_sample'``, ``'random_sample_per_pix'``, ``'samples'``,
``'median'`` and ``'mean'``. The ``mode`` determines how the output
will reflect the probabilistic nature of the IPHAS dust map.
Returns:
Monochromatic extinction, A0, at the specified coordinates, in mags.
The shape of the output depends on the ``mode``, and on whether
``coords`` contains distances.
If ``coords`` does not specify distance(s), then the shape of the
output begins with `coords.shape`. If `coords` does specify
distance(s), then the shape of the output begins with
``coords.shape + ([number of distance bins],)``.
If ``mode`` is ``'random_sample'``, then at each coordinate/distance, a
random sample of reddening is given.
If ``mode`` is ``'random_sample_per_pix'``, then the sample chosen for
each angular pixel of the map will be consistent. For example, if
two query coordinates lie in the same map pixel, then the same
random sample will be chosen from the map for both query
coordinates.
If ``mode`` is ``'median'``, then at each coordinate/distance, the
median reddening is returned.
If ``mode`` is ``'mean'``, then at each coordinate/distance, the mean
reddening is returned.
Finally, if ``mode`` is ``'samples'``, then all at each
coordinate/distance, all samples are returned.
"""
# Check that the query mode is supported
valid_modes = [
'random_sample',
'random_sample_per_pix',
'samples',
'median',
'mean']
if mode not in valid_modes:
raise ValueError(
'"{}" is not a valid `mode`. Valid modes are:\n'
' {}'.format(mode, valid_modes))
n_coords_ret = coords.shape[0]
# Determine if distance has been requested
has_dist = hasattr(coords.distance, 'kpc')
d = coords.distance.kpc if has_dist else None
# Convert coordinates to pixel indices
pix_idx = self._coords2idx(coords)
# Determine which coordinates are out of bounds
mask_idx = (pix_idx == self._n_pix)
if np.any(mask_idx):
pix_idx[mask_idx] = 0
# Which samples to extract
if mode == 'random_sample':
samp_idx = np.random.randint(0, self._n_samples, pix_idx.size)
n_samp_ret = 1
elif mode == 'random_sample_per_pix':
samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx]
n_sample_ret = 1
else:
samp_idx = slice(None)
n_samp_ret = self._n_samples
# Which distances to extract
if has_dist:
d = coords.distance.pc
dist_idx_ceil = np.searchsorted(self._dists, d)
if isinstance(samp_idx, slice):
ret = np.empty((n_coords_ret, n_samp_ret), dtype='f4')
else:
ret = np.empty((n_coords_ret,), dtype='f4')
# d < d(nearest distance slice)
idx_near = (dist_idx_ceil == 0)
if np.any(idx_near):
a = d[idx_near] / self._dists[0]
if isinstance(samp_idx, slice):
ret[idx_near] = a[:,None] * self._data['A0'][pix_idx[idx_near], 0, samp_idx]
else:
ret[idx_near] = a[:] * self._data['A0'][pix_idx[idx_near], 0, samp_idx[idx_near]]
# d > d(farthest distance slice)
idx_far = (dist_idx_ceil == self._n_dists)
if np.any(idx_far):
if isinstance(samp_idx, slice):
ret[idx_far] = self._data['A0'][pix_idx[idx_far], -1, samp_idx]
else:
ret[idx_far] = self._data['A0'][pix_idx[idx_far], -1, samp_idx[idx_far]]
# d(nearest distance slice) < d < d(farthest distance slice)
idx_btw = ~idx_near & ~idx_far
if np.any(idx_btw):
d_ceil = self._dists[dist_idx_ceil[idx_btw]]
d_floor = self._dists[dist_idx_ceil[idx_btw]-1]
a = (d_ceil - d[idx_btw]) / (d_ceil - d_floor)
if isinstance(samp_idx, slice):
ret[idx_btw] = (
(1.-a[:,None]) * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx]
+ a[:,None] * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1, samp_idx])
else:
ret[idx_btw] = (
(1.-a[:]) * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx[idx_btw]]
+ a[:] * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1, samp_idx[idx_btw]])
else:
# TODO: Harmonize order of distances & samples with Bayestar.
ret = self._data['A0'][pix_idx, :, samp_idx]
# Reduce the samples in the requested manner
samp_axis = 1 if has_dist else 2
if mode == 'median':
ret = np.median(ret, axis=samp_axis)
elif mode == 'mean':
ret = np.mean(ret, axis=samp_axis)
if np.any(mask_idx):
ret[mask_idx] = np.nan
return ret | def function[query, parameter[self, coords, mode]]:
constant[
Returns A0 at the given coordinates. There are several different query
modes, which handle the probabilistic nature of the map differently.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
mode (Optional[:obj:`str`]): Five different query modes are available:
``'random_sample'``, ``'random_sample_per_pix'``, ``'samples'``,
``'median'`` and ``'mean'``. The ``mode`` determines how the output
will reflect the probabilistic nature of the IPHAS dust map.
Returns:
Monochromatic extinction, A0, at the specified coordinates, in mags.
The shape of the output depends on the ``mode``, and on whether
``coords`` contains distances.
If ``coords`` does not specify distance(s), then the shape of the
output begins with `coords.shape`. If `coords` does specify
distance(s), then the shape of the output begins with
``coords.shape + ([number of distance bins],)``.
If ``mode`` is ``'random_sample'``, then at each coordinate/distance, a
random sample of reddening is given.
If ``mode`` is ``'random_sample_per_pix'``, then the sample chosen for
each angular pixel of the map will be consistent. For example, if
two query coordinates lie in the same map pixel, then the same
random sample will be chosen from the map for both query
coordinates.
If ``mode`` is ``'median'``, then at each coordinate/distance, the
median reddening is returned.
If ``mode`` is ``'mean'``, then at each coordinate/distance, the mean
reddening is returned.
Finally, if ``mode`` is ``'samples'``, then all at each
coordinate/distance, all samples are returned.
]
variable[valid_modes] assign[=] list[[<ast.Constant object at 0x7da1b26f3610>, <ast.Constant object at 0x7da1b26f0940>, <ast.Constant object at 0x7da1b26f3160>, <ast.Constant object at 0x7da1b26f31c0>, <ast.Constant object at 0x7da1b26f3220>]]
if compare[name[mode] <ast.NotIn object at 0x7da2590d7190> name[valid_modes]] begin[:]
<ast.Raise object at 0x7da1b26f12a0>
variable[n_coords_ret] assign[=] call[name[coords].shape][constant[0]]
variable[has_dist] assign[=] call[name[hasattr], parameter[name[coords].distance, constant[kpc]]]
variable[d] assign[=] <ast.IfExp object at 0x7da1b26f0b80>
variable[pix_idx] assign[=] call[name[self]._coords2idx, parameter[name[coords]]]
variable[mask_idx] assign[=] compare[name[pix_idx] equal[==] name[self]._n_pix]
if call[name[np].any, parameter[name[mask_idx]]] begin[:]
call[name[pix_idx]][name[mask_idx]] assign[=] constant[0]
if compare[name[mode] equal[==] constant[random_sample]] begin[:]
variable[samp_idx] assign[=] call[name[np].random.randint, parameter[constant[0], name[self]._n_samples, name[pix_idx].size]]
variable[n_samp_ret] assign[=] constant[1]
if name[has_dist] begin[:]
variable[d] assign[=] name[coords].distance.pc
variable[dist_idx_ceil] assign[=] call[name[np].searchsorted, parameter[name[self]._dists, name[d]]]
if call[name[isinstance], parameter[name[samp_idx], name[slice]]] begin[:]
variable[ret] assign[=] call[name[np].empty, parameter[tuple[[<ast.Name object at 0x7da1b26f04f0>, <ast.Name object at 0x7da1b26f2080>]]]]
variable[idx_near] assign[=] compare[name[dist_idx_ceil] equal[==] constant[0]]
if call[name[np].any, parameter[name[idx_near]]] begin[:]
variable[a] assign[=] binary_operation[call[name[d]][name[idx_near]] / call[name[self]._dists][constant[0]]]
if call[name[isinstance], parameter[name[samp_idx], name[slice]]] begin[:]
call[name[ret]][name[idx_near]] assign[=] binary_operation[call[name[a]][tuple[[<ast.Slice object at 0x7da1b26f03a0>, <ast.Constant object at 0x7da1b26f0370>]]] * call[call[name[self]._data][constant[A0]]][tuple[[<ast.Subscript object at 0x7da1b26f1600>, <ast.Constant object at 0x7da1b26f1750>, <ast.Name object at 0x7da1b26f0070>]]]]
variable[idx_far] assign[=] compare[name[dist_idx_ceil] equal[==] name[self]._n_dists]
if call[name[np].any, parameter[name[idx_far]]] begin[:]
if call[name[isinstance], parameter[name[samp_idx], name[slice]]] begin[:]
call[name[ret]][name[idx_far]] assign[=] call[call[name[self]._data][constant[A0]]][tuple[[<ast.Subscript object at 0x7da1b26f2d10>, <ast.UnaryOp object at 0x7da1b26f28c0>, <ast.Name object at 0x7da1b26f27a0>]]]
variable[idx_btw] assign[=] binary_operation[<ast.UnaryOp object at 0x7da2044c38b0> <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da2044c23b0>]
if call[name[np].any, parameter[name[idx_btw]]] begin[:]
variable[d_ceil] assign[=] call[name[self]._dists][call[name[dist_idx_ceil]][name[idx_btw]]]
variable[d_floor] assign[=] call[name[self]._dists][binary_operation[call[name[dist_idx_ceil]][name[idx_btw]] - constant[1]]]
variable[a] assign[=] binary_operation[binary_operation[name[d_ceil] - call[name[d]][name[idx_btw]]] / binary_operation[name[d_ceil] - name[d_floor]]]
if call[name[isinstance], parameter[name[samp_idx], name[slice]]] begin[:]
call[name[ret]][name[idx_btw]] assign[=] binary_operation[binary_operation[binary_operation[constant[1.0] - call[name[a]][tuple[[<ast.Slice object at 0x7da2044c3dc0>, <ast.Constant object at 0x7da2044c26e0>]]]] * call[call[name[self]._data][constant[A0]]][tuple[[<ast.Subscript object at 0x7da2044c03d0>, <ast.Subscript object at 0x7da2044c1240>, <ast.Name object at 0x7da2044c2050>]]]] + binary_operation[call[name[a]][tuple[[<ast.Slice object at 0x7da2044c1420>, <ast.Constant object at 0x7da2044c34c0>]]] * call[call[name[self]._data][constant[A0]]][tuple[[<ast.Subscript object at 0x7da2044c3160>, <ast.BinOp object at 0x7da2044c25c0>, <ast.Name object at 0x7da2044c1e70>]]]]]
variable[samp_axis] assign[=] <ast.IfExp object at 0x7da18bc733d0>
if compare[name[mode] equal[==] constant[median]] begin[:]
variable[ret] assign[=] call[name[np].median, parameter[name[ret]]]
if call[name[np].any, parameter[name[mask_idx]]] begin[:]
call[name[ret]][name[mask_idx]] assign[=] name[np].nan
return[name[ret]] | keyword[def] identifier[query] ( identifier[self] , identifier[coords] , identifier[mode] = literal[string] ):
literal[string]
identifier[valid_modes] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
keyword[if] identifier[mode] keyword[not] keyword[in] identifier[valid_modes] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[mode] , identifier[valid_modes] ))
identifier[n_coords_ret] = identifier[coords] . identifier[shape] [ literal[int] ]
identifier[has_dist] = identifier[hasattr] ( identifier[coords] . identifier[distance] , literal[string] )
identifier[d] = identifier[coords] . identifier[distance] . identifier[kpc] keyword[if] identifier[has_dist] keyword[else] keyword[None]
identifier[pix_idx] = identifier[self] . identifier[_coords2idx] ( identifier[coords] )
identifier[mask_idx] =( identifier[pix_idx] == identifier[self] . identifier[_n_pix] )
keyword[if] identifier[np] . identifier[any] ( identifier[mask_idx] ):
identifier[pix_idx] [ identifier[mask_idx] ]= literal[int]
keyword[if] identifier[mode] == literal[string] :
identifier[samp_idx] = identifier[np] . identifier[random] . identifier[randint] ( literal[int] , identifier[self] . identifier[_n_samples] , identifier[pix_idx] . identifier[size] )
identifier[n_samp_ret] = literal[int]
keyword[elif] identifier[mode] == literal[string] :
identifier[samp_idx] = identifier[np] . identifier[random] . identifier[randint] ( literal[int] , identifier[self] . identifier[_n_samples] , identifier[self] . identifier[_n_pix] )[ identifier[pix_idx] ]
identifier[n_sample_ret] = literal[int]
keyword[else] :
identifier[samp_idx] = identifier[slice] ( keyword[None] )
identifier[n_samp_ret] = identifier[self] . identifier[_n_samples]
keyword[if] identifier[has_dist] :
identifier[d] = identifier[coords] . identifier[distance] . identifier[pc]
identifier[dist_idx_ceil] = identifier[np] . identifier[searchsorted] ( identifier[self] . identifier[_dists] , identifier[d] )
keyword[if] identifier[isinstance] ( identifier[samp_idx] , identifier[slice] ):
identifier[ret] = identifier[np] . identifier[empty] (( identifier[n_coords_ret] , identifier[n_samp_ret] ), identifier[dtype] = literal[string] )
keyword[else] :
identifier[ret] = identifier[np] . identifier[empty] (( identifier[n_coords_ret] ,), identifier[dtype] = literal[string] )
identifier[idx_near] =( identifier[dist_idx_ceil] == literal[int] )
keyword[if] identifier[np] . identifier[any] ( identifier[idx_near] ):
identifier[a] = identifier[d] [ identifier[idx_near] ]/ identifier[self] . identifier[_dists] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[samp_idx] , identifier[slice] ):
identifier[ret] [ identifier[idx_near] ]= identifier[a] [:, keyword[None] ]* identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_near] ], literal[int] , identifier[samp_idx] ]
keyword[else] :
identifier[ret] [ identifier[idx_near] ]= identifier[a] [:]* identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_near] ], literal[int] , identifier[samp_idx] [ identifier[idx_near] ]]
identifier[idx_far] =( identifier[dist_idx_ceil] == identifier[self] . identifier[_n_dists] )
keyword[if] identifier[np] . identifier[any] ( identifier[idx_far] ):
keyword[if] identifier[isinstance] ( identifier[samp_idx] , identifier[slice] ):
identifier[ret] [ identifier[idx_far] ]= identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_far] ],- literal[int] , identifier[samp_idx] ]
keyword[else] :
identifier[ret] [ identifier[idx_far] ]= identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_far] ],- literal[int] , identifier[samp_idx] [ identifier[idx_far] ]]
identifier[idx_btw] =~ identifier[idx_near] &~ identifier[idx_far]
keyword[if] identifier[np] . identifier[any] ( identifier[idx_btw] ):
identifier[d_ceil] = identifier[self] . identifier[_dists] [ identifier[dist_idx_ceil] [ identifier[idx_btw] ]]
identifier[d_floor] = identifier[self] . identifier[_dists] [ identifier[dist_idx_ceil] [ identifier[idx_btw] ]- literal[int] ]
identifier[a] =( identifier[d_ceil] - identifier[d] [ identifier[idx_btw] ])/( identifier[d_ceil] - identifier[d_floor] )
keyword[if] identifier[isinstance] ( identifier[samp_idx] , identifier[slice] ):
identifier[ret] [ identifier[idx_btw] ]=(
( literal[int] - identifier[a] [:, keyword[None] ])* identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_btw] ], identifier[dist_idx_ceil] [ identifier[idx_btw] ], identifier[samp_idx] ]
+ identifier[a] [:, keyword[None] ]* identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_btw] ], identifier[dist_idx_ceil] [ identifier[idx_btw] ]- literal[int] , identifier[samp_idx] ])
keyword[else] :
identifier[ret] [ identifier[idx_btw] ]=(
( literal[int] - identifier[a] [:])* identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_btw] ], identifier[dist_idx_ceil] [ identifier[idx_btw] ], identifier[samp_idx] [ identifier[idx_btw] ]]
+ identifier[a] [:]* identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] [ identifier[idx_btw] ], identifier[dist_idx_ceil] [ identifier[idx_btw] ]- literal[int] , identifier[samp_idx] [ identifier[idx_btw] ]])
keyword[else] :
identifier[ret] = identifier[self] . identifier[_data] [ literal[string] ][ identifier[pix_idx] ,:, identifier[samp_idx] ]
identifier[samp_axis] = literal[int] keyword[if] identifier[has_dist] keyword[else] literal[int]
keyword[if] identifier[mode] == literal[string] :
identifier[ret] = identifier[np] . identifier[median] ( identifier[ret] , identifier[axis] = identifier[samp_axis] )
keyword[elif] identifier[mode] == literal[string] :
identifier[ret] = identifier[np] . identifier[mean] ( identifier[ret] , identifier[axis] = identifier[samp_axis] )
keyword[if] identifier[np] . identifier[any] ( identifier[mask_idx] ):
identifier[ret] [ identifier[mask_idx] ]= identifier[np] . identifier[nan]
keyword[return] identifier[ret] | def query(self, coords, mode='random_sample'):
"""
Returns A0 at the given coordinates. There are several different query
modes, which handle the probabilistic nature of the map differently.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
mode (Optional[:obj:`str`]): Five different query modes are available:
``'random_sample'``, ``'random_sample_per_pix'``, ``'samples'``,
``'median'`` and ``'mean'``. The ``mode`` determines how the output
will reflect the probabilistic nature of the IPHAS dust map.
Returns:
Monochromatic extinction, A0, at the specified coordinates, in mags.
The shape of the output depends on the ``mode``, and on whether
``coords`` contains distances.
If ``coords`` does not specify distance(s), then the shape of the
output begins with `coords.shape`. If `coords` does specify
distance(s), then the shape of the output begins with
``coords.shape + ([number of distance bins],)``.
If ``mode`` is ``'random_sample'``, then at each coordinate/distance, a
random sample of reddening is given.
If ``mode`` is ``'random_sample_per_pix'``, then the sample chosen for
each angular pixel of the map will be consistent. For example, if
two query coordinates lie in the same map pixel, then the same
random sample will be chosen from the map for both query
coordinates.
If ``mode`` is ``'median'``, then at each coordinate/distance, the
median reddening is returned.
If ``mode`` is ``'mean'``, then at each coordinate/distance, the mean
reddening is returned.
Finally, if ``mode`` is ``'samples'``, then all at each
coordinate/distance, all samples are returned.
"""
# Check that the query mode is supported
valid_modes = ['random_sample', 'random_sample_per_pix', 'samples', 'median', 'mean']
if mode not in valid_modes:
raise ValueError('"{}" is not a valid `mode`. Valid modes are:\n {}'.format(mode, valid_modes)) # depends on [control=['if'], data=['mode', 'valid_modes']]
n_coords_ret = coords.shape[0]
# Determine if distance has been requested
has_dist = hasattr(coords.distance, 'kpc')
d = coords.distance.kpc if has_dist else None
# Convert coordinates to pixel indices
pix_idx = self._coords2idx(coords)
# Determine which coordinates are out of bounds
mask_idx = pix_idx == self._n_pix
if np.any(mask_idx):
pix_idx[mask_idx] = 0 # depends on [control=['if'], data=[]]
# Which samples to extract
if mode == 'random_sample':
samp_idx = np.random.randint(0, self._n_samples, pix_idx.size)
n_samp_ret = 1 # depends on [control=['if'], data=[]]
elif mode == 'random_sample_per_pix':
samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx]
n_sample_ret = 1 # depends on [control=['if'], data=[]]
else:
samp_idx = slice(None)
n_samp_ret = self._n_samples
# Which distances to extract
if has_dist:
d = coords.distance.pc
dist_idx_ceil = np.searchsorted(self._dists, d)
if isinstance(samp_idx, slice):
ret = np.empty((n_coords_ret, n_samp_ret), dtype='f4') # depends on [control=['if'], data=[]]
else:
ret = np.empty((n_coords_ret,), dtype='f4')
# d < d(nearest distance slice)
idx_near = dist_idx_ceil == 0
if np.any(idx_near):
a = d[idx_near] / self._dists[0]
if isinstance(samp_idx, slice):
ret[idx_near] = a[:, None] * self._data['A0'][pix_idx[idx_near], 0, samp_idx] # depends on [control=['if'], data=[]]
else:
ret[idx_near] = a[:] * self._data['A0'][pix_idx[idx_near], 0, samp_idx[idx_near]] # depends on [control=['if'], data=[]]
# d > d(farthest distance slice)
idx_far = dist_idx_ceil == self._n_dists
if np.any(idx_far):
if isinstance(samp_idx, slice):
ret[idx_far] = self._data['A0'][pix_idx[idx_far], -1, samp_idx] # depends on [control=['if'], data=[]]
else:
ret[idx_far] = self._data['A0'][pix_idx[idx_far], -1, samp_idx[idx_far]] # depends on [control=['if'], data=[]]
# d(nearest distance slice) < d < d(farthest distance slice)
idx_btw = ~idx_near & ~idx_far
if np.any(idx_btw):
d_ceil = self._dists[dist_idx_ceil[idx_btw]]
d_floor = self._dists[dist_idx_ceil[idx_btw] - 1]
a = (d_ceil - d[idx_btw]) / (d_ceil - d_floor)
if isinstance(samp_idx, slice):
ret[idx_btw] = (1.0 - a[:, None]) * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx] + a[:, None] * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw] - 1, samp_idx] # depends on [control=['if'], data=[]]
else:
ret[idx_btw] = (1.0 - a[:]) * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx[idx_btw]] + a[:] * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw] - 1, samp_idx[idx_btw]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# TODO: Harmonize order of distances & samples with Bayestar.
ret = self._data['A0'][pix_idx, :, samp_idx]
# Reduce the samples in the requested manner
samp_axis = 1 if has_dist else 2
if mode == 'median':
ret = np.median(ret, axis=samp_axis) # depends on [control=['if'], data=[]]
elif mode == 'mean':
ret = np.mean(ret, axis=samp_axis) # depends on [control=['if'], data=[]]
if np.any(mask_idx):
ret[mask_idx] = np.nan # depends on [control=['if'], data=[]]
return ret |
def group_nodes_by_annotation(graph: BELGraph, annotation: str = 'Subgraph') -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation."""
result = defaultdict(set)
for u, v, d in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue
result[d[ANNOTATIONS][annotation]].add(u)
result[d[ANNOTATIONS][annotation]].add(v)
return dict(result) | def function[group_nodes_by_annotation, parameter[graph, annotation]]:
constant[Group the nodes occurring in edges by the given annotation.]
variable[result] assign[=] call[name[defaultdict], parameter[name[set]]]
for taget[tuple[[<ast.Name object at 0x7da18f810880>, <ast.Name object at 0x7da18f811420>, <ast.Name object at 0x7da18f8136a0>]]] in starred[call[name[graph].edges, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18f813f10> begin[:]
continue
call[call[name[result]][call[call[name[d]][name[ANNOTATIONS]]][name[annotation]]].add, parameter[name[u]]]
call[call[name[result]][call[call[name[d]][name[ANNOTATIONS]]][name[annotation]]].add, parameter[name[v]]]
return[call[name[dict], parameter[name[result]]]] | keyword[def] identifier[group_nodes_by_annotation] ( identifier[graph] : identifier[BELGraph] , identifier[annotation] : identifier[str] = literal[string] )-> identifier[Mapping] [ identifier[str] , identifier[Set] [ identifier[BaseEntity] ]]:
literal[string]
identifier[result] = identifier[defaultdict] ( identifier[set] )
keyword[for] identifier[u] , identifier[v] , identifier[d] keyword[in] identifier[graph] . identifier[edges] ( identifier[data] = keyword[True] ):
keyword[if] keyword[not] identifier[edge_has_annotation] ( identifier[d] , identifier[annotation] ):
keyword[continue]
identifier[result] [ identifier[d] [ identifier[ANNOTATIONS] ][ identifier[annotation] ]]. identifier[add] ( identifier[u] )
identifier[result] [ identifier[d] [ identifier[ANNOTATIONS] ][ identifier[annotation] ]]. identifier[add] ( identifier[v] )
keyword[return] identifier[dict] ( identifier[result] ) | def group_nodes_by_annotation(graph: BELGraph, annotation: str='Subgraph') -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation."""
result = defaultdict(set)
for (u, v, d) in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue # depends on [control=['if'], data=[]]
result[d[ANNOTATIONS][annotation]].add(u)
result[d[ANNOTATIONS][annotation]].add(v) # depends on [control=['for'], data=[]]
return dict(result) |
def filter(self, table, domains, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(domain):
if q in domain.name.lower():
return True
return False
return filter(comp, domains) | def function[filter, parameter[self, table, domains, filter_string]]:
constant[Naive case-insensitive search.]
variable[q] assign[=] call[name[filter_string].lower, parameter[]]
def function[comp, parameter[domain]]:
if compare[name[q] in call[name[domain].name.lower, parameter[]]] begin[:]
return[constant[True]]
return[constant[False]]
return[call[name[filter], parameter[name[comp], name[domains]]]] | keyword[def] identifier[filter] ( identifier[self] , identifier[table] , identifier[domains] , identifier[filter_string] ):
literal[string]
identifier[q] = identifier[filter_string] . identifier[lower] ()
keyword[def] identifier[comp] ( identifier[domain] ):
keyword[if] identifier[q] keyword[in] identifier[domain] . identifier[name] . identifier[lower] ():
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[return] identifier[filter] ( identifier[comp] , identifier[domains] ) | def filter(self, table, domains, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(domain):
if q in domain.name.lower():
return True # depends on [control=['if'], data=[]]
return False
return filter(comp, domains) |
def _check_enclosing_characters(string, opener, closer):
"""
Makes sure that the enclosing characters for a definition set make sense
1) There is only one set
2) They are in the right order (opening, then closing)
"""
opener_count = string.count(opener)
closer_count = string.count(closer)
total = opener_count + closer_count
if total > 2:
msg = MORE_THAN_ONE_SET.format(opener, closer)
raise ValueError(msg)
elif total == 1:
msg = INCOMPLETE_SET.format(opener, closer)
raise ValueError(msg)
elif opener_count > 1:
msg = INCORRECT_SET_CONSTITUENT.format(opener)
raise ValueError(msg)
elif closer_count > 1:
msg = INCORRECT_SET_CONSTITUENT.format(closer)
raise ValueError(msg) | def function[_check_enclosing_characters, parameter[string, opener, closer]]:
constant[
Makes sure that the enclosing characters for a definition set make sense
1) There is only one set
2) They are in the right order (opening, then closing)
]
variable[opener_count] assign[=] call[name[string].count, parameter[name[opener]]]
variable[closer_count] assign[=] call[name[string].count, parameter[name[closer]]]
variable[total] assign[=] binary_operation[name[opener_count] + name[closer_count]]
if compare[name[total] greater[>] constant[2]] begin[:]
variable[msg] assign[=] call[name[MORE_THAN_ONE_SET].format, parameter[name[opener], name[closer]]]
<ast.Raise object at 0x7da18fe92e60> | keyword[def] identifier[_check_enclosing_characters] ( identifier[string] , identifier[opener] , identifier[closer] ):
literal[string]
identifier[opener_count] = identifier[string] . identifier[count] ( identifier[opener] )
identifier[closer_count] = identifier[string] . identifier[count] ( identifier[closer] )
identifier[total] = identifier[opener_count] + identifier[closer_count]
keyword[if] identifier[total] > literal[int] :
identifier[msg] = identifier[MORE_THAN_ONE_SET] . identifier[format] ( identifier[opener] , identifier[closer] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[elif] identifier[total] == literal[int] :
identifier[msg] = identifier[INCOMPLETE_SET] . identifier[format] ( identifier[opener] , identifier[closer] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[elif] identifier[opener_count] > literal[int] :
identifier[msg] = identifier[INCORRECT_SET_CONSTITUENT] . identifier[format] ( identifier[opener] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[elif] identifier[closer_count] > literal[int] :
identifier[msg] = identifier[INCORRECT_SET_CONSTITUENT] . identifier[format] ( identifier[closer] )
keyword[raise] identifier[ValueError] ( identifier[msg] ) | def _check_enclosing_characters(string, opener, closer):
"""
Makes sure that the enclosing characters for a definition set make sense
1) There is only one set
2) They are in the right order (opening, then closing)
"""
opener_count = string.count(opener)
closer_count = string.count(closer)
total = opener_count + closer_count
if total > 2:
msg = MORE_THAN_ONE_SET.format(opener, closer)
raise ValueError(msg) # depends on [control=['if'], data=[]]
elif total == 1:
msg = INCOMPLETE_SET.format(opener, closer)
raise ValueError(msg) # depends on [control=['if'], data=[]]
elif opener_count > 1:
msg = INCORRECT_SET_CONSTITUENT.format(opener)
raise ValueError(msg) # depends on [control=['if'], data=[]]
elif closer_count > 1:
msg = INCORRECT_SET_CONSTITUENT.format(closer)
raise ValueError(msg) # depends on [control=['if'], data=[]] |
def sscan(self, key, cursor=0, pattern=None, count=None):
"""The :meth:`~tredis.RedisClient.sscan` command and the closely
related commands :meth:`~tredis.RedisClient.scan`,
:meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan`
are used in order to incrementally iterate over a collection of
elements.
- :meth:`~tredis.RedisClient.scan` iterates the set of keys in the
currently selected Redis database.
- :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types.
- :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and
their associated values.
- :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set
types and their associated scores.
**Basic usage**
:meth:`~tredis.RedisClient.sscan` is a cursor based iterator. This
means that at every call of the command, the server returns an updated
cursor that the user needs to use as the cursor argument in the next
call.
An iteration starts when the cursor is set to ``0``, and terminates
when the cursor returned by the server is ``0``.
For more information on :meth:`~tredis.RedisClient.scan`,
visit the `Redis docs on scan <http://redis.io/commands/scan>`_.
.. note::
**Time complexity**: ``O(1)`` for every call. ``O(N)`` for a
complete iteration, including enough command calls for the cursor to
return back to ``0``. ``N`` is the number of elements inside the
collection.
:param key: The key to scan
:type key: :class:`str`, :class:`bytes`
:param int cursor: The server specified cursor value or ``0``
:param pattern: An optional pattern to apply for key matching
:type pattern: :class:`str`, :class:`bytes`
:param int count: An optional amount of work to perform in the scan
:rtype: int, list
:returns: A tuple containing the cursor and the list of set items
:raises: :exc:`~tredis.exceptions.RedisError`
"""
def format_response(value):
"""Format the response from redis
:param tuple value: The return response from redis
:rtype: tuple(int, list)
"""
return int(value[0]), value[1]
command = [b'SSCAN', key, ascii(cursor).encode('ascii')]
if pattern:
command += [b'MATCH', pattern]
if count:
command += [b'COUNT', ascii(count).encode('ascii')]
return self._execute(command, format_callback=format_response) | def function[sscan, parameter[self, key, cursor, pattern, count]]:
constant[The :meth:`~tredis.RedisClient.sscan` command and the closely
related commands :meth:`~tredis.RedisClient.scan`,
:meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan`
are used in order to incrementally iterate over a collection of
elements.
- :meth:`~tredis.RedisClient.scan` iterates the set of keys in the
currently selected Redis database.
- :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types.
- :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and
their associated values.
- :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set
types and their associated scores.
**Basic usage**
:meth:`~tredis.RedisClient.sscan` is a cursor based iterator. This
means that at every call of the command, the server returns an updated
cursor that the user needs to use as the cursor argument in the next
call.
An iteration starts when the cursor is set to ``0``, and terminates
when the cursor returned by the server is ``0``.
For more information on :meth:`~tredis.RedisClient.scan`,
visit the `Redis docs on scan <http://redis.io/commands/scan>`_.
.. note::
**Time complexity**: ``O(1)`` for every call. ``O(N)`` for a
complete iteration, including enough command calls for the cursor to
return back to ``0``. ``N`` is the number of elements inside the
collection.
:param key: The key to scan
:type key: :class:`str`, :class:`bytes`
:param int cursor: The server specified cursor value or ``0``
:param pattern: An optional pattern to apply for key matching
:type pattern: :class:`str`, :class:`bytes`
:param int count: An optional amount of work to perform in the scan
:rtype: int, list
:returns: A tuple containing the cursor and the list of set items
:raises: :exc:`~tredis.exceptions.RedisError`
]
def function[format_response, parameter[value]]:
constant[Format the response from redis
:param tuple value: The return response from redis
:rtype: tuple(int, list)
]
return[tuple[[<ast.Call object at 0x7da207f00220>, <ast.Subscript object at 0x7da207f02cb0>]]]
variable[command] assign[=] list[[<ast.Constant object at 0x7da207f02ec0>, <ast.Name object at 0x7da207f03a30>, <ast.Call object at 0x7da207f00880>]]
if name[pattern] begin[:]
<ast.AugAssign object at 0x7da207f02da0>
if name[count] begin[:]
<ast.AugAssign object at 0x7da207f01660>
return[call[name[self]._execute, parameter[name[command]]]] | keyword[def] identifier[sscan] ( identifier[self] , identifier[key] , identifier[cursor] = literal[int] , identifier[pattern] = keyword[None] , identifier[count] = keyword[None] ):
literal[string]
keyword[def] identifier[format_response] ( identifier[value] ):
literal[string]
keyword[return] identifier[int] ( identifier[value] [ literal[int] ]), identifier[value] [ literal[int] ]
identifier[command] =[ literal[string] , identifier[key] , identifier[ascii] ( identifier[cursor] ). identifier[encode] ( literal[string] )]
keyword[if] identifier[pattern] :
identifier[command] +=[ literal[string] , identifier[pattern] ]
keyword[if] identifier[count] :
identifier[command] +=[ literal[string] , identifier[ascii] ( identifier[count] ). identifier[encode] ( literal[string] )]
keyword[return] identifier[self] . identifier[_execute] ( identifier[command] , identifier[format_callback] = identifier[format_response] ) | def sscan(self, key, cursor=0, pattern=None, count=None):
"""The :meth:`~tredis.RedisClient.sscan` command and the closely
related commands :meth:`~tredis.RedisClient.scan`,
:meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan`
are used in order to incrementally iterate over a collection of
elements.
- :meth:`~tredis.RedisClient.scan` iterates the set of keys in the
currently selected Redis database.
- :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types.
- :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and
their associated values.
- :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set
types and their associated scores.
**Basic usage**
:meth:`~tredis.RedisClient.sscan` is a cursor based iterator. This
means that at every call of the command, the server returns an updated
cursor that the user needs to use as the cursor argument in the next
call.
An iteration starts when the cursor is set to ``0``, and terminates
when the cursor returned by the server is ``0``.
For more information on :meth:`~tredis.RedisClient.scan`,
visit the `Redis docs on scan <http://redis.io/commands/scan>`_.
.. note::
**Time complexity**: ``O(1)`` for every call. ``O(N)`` for a
complete iteration, including enough command calls for the cursor to
return back to ``0``. ``N`` is the number of elements inside the
collection.
:param key: The key to scan
:type key: :class:`str`, :class:`bytes`
:param int cursor: The server specified cursor value or ``0``
:param pattern: An optional pattern to apply for key matching
:type pattern: :class:`str`, :class:`bytes`
:param int count: An optional amount of work to perform in the scan
:rtype: int, list
:returns: A tuple containing the cursor and the list of set items
:raises: :exc:`~tredis.exceptions.RedisError`
"""
def format_response(value):
"""Format the response from redis
:param tuple value: The return response from redis
:rtype: tuple(int, list)
"""
return (int(value[0]), value[1])
command = [b'SSCAN', key, ascii(cursor).encode('ascii')]
if pattern:
command += [b'MATCH', pattern] # depends on [control=['if'], data=[]]
if count:
command += [b'COUNT', ascii(count).encode('ascii')] # depends on [control=['if'], data=[]]
return self._execute(command, format_callback=format_response) |
def _check_valid_version():
'''
Check the version of Bower to ensure this module will work. Currently
bower must be at least version 1.3.
'''
# pylint: disable=no-member
bower_version = _LooseVersion(
__salt__['cmd.run']('bower --version'))
valid_version = _LooseVersion('1.3')
# pylint: enable=no-member
if bower_version < valid_version:
raise CommandExecutionError(
'\'bower\' is not recent enough({0} < {1}). '
'Please Upgrade.'.format(
bower_version, valid_version
)
) | def function[_check_valid_version, parameter[]]:
constant[
Check the version of Bower to ensure this module will work. Currently
bower must be at least version 1.3.
]
variable[bower_version] assign[=] call[name[_LooseVersion], parameter[call[call[name[__salt__]][constant[cmd.run]], parameter[constant[bower --version]]]]]
variable[valid_version] assign[=] call[name[_LooseVersion], parameter[constant[1.3]]]
if compare[name[bower_version] less[<] name[valid_version]] begin[:]
<ast.Raise object at 0x7da1b1f35690> | keyword[def] identifier[_check_valid_version] ():
literal[string]
identifier[bower_version] = identifier[_LooseVersion] (
identifier[__salt__] [ literal[string] ]( literal[string] ))
identifier[valid_version] = identifier[_LooseVersion] ( literal[string] )
keyword[if] identifier[bower_version] < identifier[valid_version] :
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string] . identifier[format] (
identifier[bower_version] , identifier[valid_version]
)
) | def _check_valid_version():
"""
Check the version of Bower to ensure this module will work. Currently
bower must be at least version 1.3.
"""
# pylint: disable=no-member
bower_version = _LooseVersion(__salt__['cmd.run']('bower --version'))
valid_version = _LooseVersion('1.3')
# pylint: enable=no-member
if bower_version < valid_version:
raise CommandExecutionError("'bower' is not recent enough({0} < {1}). Please Upgrade.".format(bower_version, valid_version)) # depends on [control=['if'], data=['bower_version', 'valid_version']] |
def get_lines(data_nts, prtfmt=None, nt_fields=None, **kws):
"""Print list of namedtuples into a table using prtfmt."""
lines = []
# optional keyword args: prt_if sort_by
if prtfmt is None:
prtfmt = mk_fmtfld(data_nts[0], kws.get('joinchr', ' '), kws.get('eol', '\n'))
# if nt_fields arg is None, use fields from prtfmt string.
if nt_fields is not None:
_chk_flds_fmt(nt_fields, prtfmt)
if 'sort_by' in kws:
data_nts = sorted(data_nts, key=kws['sort_by'])
prt_if = kws.get('prt_if', None)
for data_nt in data_nts:
if prt_if is None or prt_if(data_nt):
lines.append(prtfmt.format(**data_nt._asdict()))
return lines | def function[get_lines, parameter[data_nts, prtfmt, nt_fields]]:
constant[Print list of namedtuples into a table using prtfmt.]
variable[lines] assign[=] list[[]]
if compare[name[prtfmt] is constant[None]] begin[:]
variable[prtfmt] assign[=] call[name[mk_fmtfld], parameter[call[name[data_nts]][constant[0]], call[name[kws].get, parameter[constant[joinchr], constant[ ]]], call[name[kws].get, parameter[constant[eol], constant[
]]]]]
if compare[name[nt_fields] is_not constant[None]] begin[:]
call[name[_chk_flds_fmt], parameter[name[nt_fields], name[prtfmt]]]
if compare[constant[sort_by] in name[kws]] begin[:]
variable[data_nts] assign[=] call[name[sorted], parameter[name[data_nts]]]
variable[prt_if] assign[=] call[name[kws].get, parameter[constant[prt_if], constant[None]]]
for taget[name[data_nt]] in starred[name[data_nts]] begin[:]
if <ast.BoolOp object at 0x7da20c6abeb0> begin[:]
call[name[lines].append, parameter[call[name[prtfmt].format, parameter[]]]]
return[name[lines]] | keyword[def] identifier[get_lines] ( identifier[data_nts] , identifier[prtfmt] = keyword[None] , identifier[nt_fields] = keyword[None] ,** identifier[kws] ):
literal[string]
identifier[lines] =[]
keyword[if] identifier[prtfmt] keyword[is] keyword[None] :
identifier[prtfmt] = identifier[mk_fmtfld] ( identifier[data_nts] [ literal[int] ], identifier[kws] . identifier[get] ( literal[string] , literal[string] ), identifier[kws] . identifier[get] ( literal[string] , literal[string] ))
keyword[if] identifier[nt_fields] keyword[is] keyword[not] keyword[None] :
identifier[_chk_flds_fmt] ( identifier[nt_fields] , identifier[prtfmt] )
keyword[if] literal[string] keyword[in] identifier[kws] :
identifier[data_nts] = identifier[sorted] ( identifier[data_nts] , identifier[key] = identifier[kws] [ literal[string] ])
identifier[prt_if] = identifier[kws] . identifier[get] ( literal[string] , keyword[None] )
keyword[for] identifier[data_nt] keyword[in] identifier[data_nts] :
keyword[if] identifier[prt_if] keyword[is] keyword[None] keyword[or] identifier[prt_if] ( identifier[data_nt] ):
identifier[lines] . identifier[append] ( identifier[prtfmt] . identifier[format] (** identifier[data_nt] . identifier[_asdict] ()))
keyword[return] identifier[lines] | def get_lines(data_nts, prtfmt=None, nt_fields=None, **kws):
"""Print list of namedtuples into a table using prtfmt."""
lines = []
# optional keyword args: prt_if sort_by
if prtfmt is None:
prtfmt = mk_fmtfld(data_nts[0], kws.get('joinchr', ' '), kws.get('eol', '\n')) # depends on [control=['if'], data=['prtfmt']]
# if nt_fields arg is None, use fields from prtfmt string.
if nt_fields is not None:
_chk_flds_fmt(nt_fields, prtfmt) # depends on [control=['if'], data=['nt_fields']]
if 'sort_by' in kws:
data_nts = sorted(data_nts, key=kws['sort_by']) # depends on [control=['if'], data=['kws']]
prt_if = kws.get('prt_if', None)
for data_nt in data_nts:
if prt_if is None or prt_if(data_nt):
lines.append(prtfmt.format(**data_nt._asdict())) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data_nt']]
return lines |
def _parse_vagrant_sandbox_status(self, vagrant_output):
'''
Returns the status of the sandbox mode given output from
'vagrant sandbox status'.
'''
# typical output
# [default] - snapshot mode is off
# or
# [default] - machine not created
# if the box VM is down
tokens = [token.strip() for token in vagrant_output.split(' ')]
if tokens[0] == 'Usage:':
sahara_status = 'not installed'
elif "{} {}".format(tokens[-2], tokens[-1]) == 'not created':
sahara_status = 'unknown'
else:
sahara_status = tokens[-1]
return sahara_status | def function[_parse_vagrant_sandbox_status, parameter[self, vagrant_output]]:
constant[
Returns the status of the sandbox mode given output from
'vagrant sandbox status'.
]
variable[tokens] assign[=] <ast.ListComp object at 0x7da1b1a7df90>
if compare[call[name[tokens]][constant[0]] equal[==] constant[Usage:]] begin[:]
variable[sahara_status] assign[=] constant[not installed]
return[name[sahara_status]] | keyword[def] identifier[_parse_vagrant_sandbox_status] ( identifier[self] , identifier[vagrant_output] ):
literal[string]
identifier[tokens] =[ identifier[token] . identifier[strip] () keyword[for] identifier[token] keyword[in] identifier[vagrant_output] . identifier[split] ( literal[string] )]
keyword[if] identifier[tokens] [ literal[int] ]== literal[string] :
identifier[sahara_status] = literal[string]
keyword[elif] literal[string] . identifier[format] ( identifier[tokens] [- literal[int] ], identifier[tokens] [- literal[int] ])== literal[string] :
identifier[sahara_status] = literal[string]
keyword[else] :
identifier[sahara_status] = identifier[tokens] [- literal[int] ]
keyword[return] identifier[sahara_status] | def _parse_vagrant_sandbox_status(self, vagrant_output):
"""
Returns the status of the sandbox mode given output from
'vagrant sandbox status'.
"""
# typical output
# [default] - snapshot mode is off
# or
# [default] - machine not created
# if the box VM is down
tokens = [token.strip() for token in vagrant_output.split(' ')]
if tokens[0] == 'Usage:':
sahara_status = 'not installed' # depends on [control=['if'], data=[]]
elif '{} {}'.format(tokens[-2], tokens[-1]) == 'not created':
sahara_status = 'unknown' # depends on [control=['if'], data=[]]
else:
sahara_status = tokens[-1]
return sahara_status |
def _parse_deaths(self, rows):
"""
Parses the character's recent deaths
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`
A list of all rows contained in the table.
"""
for row in rows:
cols = row.find_all('td')
death_time_str = cols[0].text.replace("\xa0", " ").strip()
death_time = parse_tibia_datetime(death_time_str)
death = str(cols[1]).replace("\xa0", " ")
death_info = death_regexp.search(death)
if death_info:
level = int(death_info.group("level"))
killers_desc = death_info.group("killers")
else:
continue
death = Death(self.name, level, time=death_time)
assists_name_list = []
# Check if the killers list contains assists
assist_match = death_assisted.search(killers_desc)
if assist_match:
# Filter out assists
killers_desc = assist_match.group("killers")
# Split assists into a list.
assists_name_list = self._split_list(assist_match.group("assists"))
killers_name_list = self._split_list(killers_desc)
for killer in killers_name_list:
killer_dict = self._parse_killer(killer)
death.killers.append(Killer(**killer_dict))
for assist in assists_name_list:
# Extract names from character links in assists list.
assist_dict = {"name": link_content.search(assist).group(1), "player": True}
death.assists.append(Killer(**assist_dict))
try:
self.deaths.append(death)
except ValueError:
# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.
continue | def function[_parse_deaths, parameter[self, rows]]:
constant[
Parses the character's recent deaths
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`
A list of all rows contained in the table.
]
for taget[name[row]] in starred[name[rows]] begin[:]
variable[cols] assign[=] call[name[row].find_all, parameter[constant[td]]]
variable[death_time_str] assign[=] call[call[call[name[cols]][constant[0]].text.replace, parameter[constant[ ], constant[ ]]].strip, parameter[]]
variable[death_time] assign[=] call[name[parse_tibia_datetime], parameter[name[death_time_str]]]
variable[death] assign[=] call[call[name[str], parameter[call[name[cols]][constant[1]]]].replace, parameter[constant[ ], constant[ ]]]
variable[death_info] assign[=] call[name[death_regexp].search, parameter[name[death]]]
if name[death_info] begin[:]
variable[level] assign[=] call[name[int], parameter[call[name[death_info].group, parameter[constant[level]]]]]
variable[killers_desc] assign[=] call[name[death_info].group, parameter[constant[killers]]]
variable[death] assign[=] call[name[Death], parameter[name[self].name, name[level]]]
variable[assists_name_list] assign[=] list[[]]
variable[assist_match] assign[=] call[name[death_assisted].search, parameter[name[killers_desc]]]
if name[assist_match] begin[:]
variable[killers_desc] assign[=] call[name[assist_match].group, parameter[constant[killers]]]
variable[assists_name_list] assign[=] call[name[self]._split_list, parameter[call[name[assist_match].group, parameter[constant[assists]]]]]
variable[killers_name_list] assign[=] call[name[self]._split_list, parameter[name[killers_desc]]]
for taget[name[killer]] in starred[name[killers_name_list]] begin[:]
variable[killer_dict] assign[=] call[name[self]._parse_killer, parameter[name[killer]]]
call[name[death].killers.append, parameter[call[name[Killer], parameter[]]]]
for taget[name[assist]] in starred[name[assists_name_list]] begin[:]
variable[assist_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b6e440>, <ast.Constant object at 0x7da1b0b6ca60>], [<ast.Call object at 0x7da1b0b6c910>, <ast.Constant object at 0x7da1b0b6ca30>]]
call[name[death].assists.append, parameter[call[name[Killer], parameter[]]]]
<ast.Try object at 0x7da1b0b6c2b0> | keyword[def] identifier[_parse_deaths] ( identifier[self] , identifier[rows] ):
literal[string]
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[cols] = identifier[row] . identifier[find_all] ( literal[string] )
identifier[death_time_str] = identifier[cols] [ literal[int] ]. identifier[text] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] ()
identifier[death_time] = identifier[parse_tibia_datetime] ( identifier[death_time_str] )
identifier[death] = identifier[str] ( identifier[cols] [ literal[int] ]). identifier[replace] ( literal[string] , literal[string] )
identifier[death_info] = identifier[death_regexp] . identifier[search] ( identifier[death] )
keyword[if] identifier[death_info] :
identifier[level] = identifier[int] ( identifier[death_info] . identifier[group] ( literal[string] ))
identifier[killers_desc] = identifier[death_info] . identifier[group] ( literal[string] )
keyword[else] :
keyword[continue]
identifier[death] = identifier[Death] ( identifier[self] . identifier[name] , identifier[level] , identifier[time] = identifier[death_time] )
identifier[assists_name_list] =[]
identifier[assist_match] = identifier[death_assisted] . identifier[search] ( identifier[killers_desc] )
keyword[if] identifier[assist_match] :
identifier[killers_desc] = identifier[assist_match] . identifier[group] ( literal[string] )
identifier[assists_name_list] = identifier[self] . identifier[_split_list] ( identifier[assist_match] . identifier[group] ( literal[string] ))
identifier[killers_name_list] = identifier[self] . identifier[_split_list] ( identifier[killers_desc] )
keyword[for] identifier[killer] keyword[in] identifier[killers_name_list] :
identifier[killer_dict] = identifier[self] . identifier[_parse_killer] ( identifier[killer] )
identifier[death] . identifier[killers] . identifier[append] ( identifier[Killer] (** identifier[killer_dict] ))
keyword[for] identifier[assist] keyword[in] identifier[assists_name_list] :
identifier[assist_dict] ={ literal[string] : identifier[link_content] . identifier[search] ( identifier[assist] ). identifier[group] ( literal[int] ), literal[string] : keyword[True] }
identifier[death] . identifier[assists] . identifier[append] ( identifier[Killer] (** identifier[assist_dict] ))
keyword[try] :
identifier[self] . identifier[deaths] . identifier[append] ( identifier[death] )
keyword[except] identifier[ValueError] :
keyword[continue] | def _parse_deaths(self, rows):
"""
Parses the character's recent deaths
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`
A list of all rows contained in the table.
"""
for row in rows:
cols = row.find_all('td')
death_time_str = cols[0].text.replace('\xa0', ' ').strip()
death_time = parse_tibia_datetime(death_time_str)
death = str(cols[1]).replace('\xa0', ' ')
death_info = death_regexp.search(death)
if death_info:
level = int(death_info.group('level'))
killers_desc = death_info.group('killers') # depends on [control=['if'], data=[]]
else:
continue
death = Death(self.name, level, time=death_time)
assists_name_list = []
# Check if the killers list contains assists
assist_match = death_assisted.search(killers_desc)
if assist_match:
# Filter out assists
killers_desc = assist_match.group('killers')
# Split assists into a list.
assists_name_list = self._split_list(assist_match.group('assists')) # depends on [control=['if'], data=[]]
killers_name_list = self._split_list(killers_desc)
for killer in killers_name_list:
killer_dict = self._parse_killer(killer)
death.killers.append(Killer(**killer_dict)) # depends on [control=['for'], data=['killer']]
for assist in assists_name_list:
# Extract names from character links in assists list.
assist_dict = {'name': link_content.search(assist).group(1), 'player': True}
death.assists.append(Killer(**assist_dict)) # depends on [control=['for'], data=['assist']]
try:
self.deaths.append(death) # depends on [control=['try'], data=[]]
except ValueError:
# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['row']] |
def create(self, body=values.unset, priority=values.unset, ttl=values.unset,
title=values.unset, sound=values.unset, action=values.unset,
data=values.unset, apn=values.unset, gcm=values.unset,
sms=values.unset, facebook_messenger=values.unset, fcm=values.unset,
segment=values.unset, alexa=values.unset, to_binding=values.unset,
identity=values.unset, tag=values.unset):
"""
Create a new NotificationInstance
:param unicode body: The notification body text
:param NotificationInstance.Priority priority: The priority of the notification
:param unicode ttl: How long, in seconds, the notification is valid
:param unicode title: The notification title
:param unicode sound: The name of the sound to be played for the notification
:param unicode action: The actions to display for the notification
:param dict data: The custom key-value pairs of the notification's payload
:param dict apn: The APNS-specific payload that overrides corresponding attributes in a generic payload for APNS Bindings
:param dict gcm: The GCM-specific payload that overrides corresponding attributes in generic payload for GCM Bindings
:param dict sms: The SMS-specific payload that overrides corresponding attributes in generic payload for SMS Bindings
:param dict facebook_messenger: Deprecated
:param dict fcm: The FCM-specific payload that overrides corresponding attributes in generic payload for FCM Bindings
:param unicode segment: A Segment to notify
:param dict alexa: Deprecated
:param unicode to_binding: The destination address specified as a JSON string
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode tag: A tag that selects the Bindings to notify
:returns: Newly created NotificationInstance
:rtype: twilio.rest.notify.v1.service.notification.NotificationInstance
"""
data = values.of({
'Identity': serialize.map(identity, lambda e: e),
'Tag': serialize.map(tag, lambda e: e),
'Body': body,
'Priority': priority,
'Ttl': ttl,
'Title': title,
'Sound': sound,
'Action': action,
'Data': serialize.object(data),
'Apn': serialize.object(apn),
'Gcm': serialize.object(gcm),
'Sms': serialize.object(sms),
'FacebookMessenger': serialize.object(facebook_messenger),
'Fcm': serialize.object(fcm),
'Segment': serialize.map(segment, lambda e: e),
'Alexa': serialize.object(alexa),
'ToBinding': serialize.map(to_binding, lambda e: e),
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return NotificationInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | def function[create, parameter[self, body, priority, ttl, title, sound, action, data, apn, gcm, sms, facebook_messenger, fcm, segment, alexa, to_binding, identity, tag]]:
constant[
Create a new NotificationInstance
:param unicode body: The notification body text
:param NotificationInstance.Priority priority: The priority of the notification
:param unicode ttl: How long, in seconds, the notification is valid
:param unicode title: The notification title
:param unicode sound: The name of the sound to be played for the notification
:param unicode action: The actions to display for the notification
:param dict data: The custom key-value pairs of the notification's payload
:param dict apn: The APNS-specific payload that overrides corresponding attributes in a generic payload for APNS Bindings
:param dict gcm: The GCM-specific payload that overrides corresponding attributes in generic payload for GCM Bindings
:param dict sms: The SMS-specific payload that overrides corresponding attributes in generic payload for SMS Bindings
:param dict facebook_messenger: Deprecated
:param dict fcm: The FCM-specific payload that overrides corresponding attributes in generic payload for FCM Bindings
:param unicode segment: A Segment to notify
:param dict alexa: Deprecated
:param unicode to_binding: The destination address specified as a JSON string
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode tag: A tag that selects the Bindings to notify
:returns: Newly created NotificationInstance
:rtype: twilio.rest.notify.v1.service.notification.NotificationInstance
]
variable[data] assign[=] call[name[values].of, parameter[dictionary[[<ast.Constant object at 0x7da1b1e02f80>, <ast.Constant object at 0x7da1b1e00760>, <ast.Constant object at 0x7da1b1e019c0>, <ast.Constant object at 0x7da1b1e00850>, <ast.Constant object at 0x7da1b1e03790>, <ast.Constant object at 0x7da1b1e00d60>, <ast.Constant object at 0x7da1b1e00eb0>, <ast.Constant object at 0x7da1b1e01360>, <ast.Constant object at 0x7da1b1e00070>, <ast.Constant object at 0x7da1b1e02620>, <ast.Constant object at 0x7da1b2347550>, <ast.Constant object at 0x7da1b2345030>, <ast.Constant object at 0x7da1b23456c0>, <ast.Constant object at 0x7da1b23472e0>, <ast.Constant object at 0x7da1b23469b0>, <ast.Constant object at 0x7da1b2345000>, <ast.Constant object at 0x7da1b2347940>], [<ast.Call object at 0x7da1b23463e0>, <ast.Call object at 0x7da1b2345e40>, <ast.Name object at 0x7da1b1eec190>, <ast.Name object at 0x7da1b1eed480>, <ast.Name object at 0x7da1b1eefa60>, <ast.Name object at 0x7da1b1eef130>, <ast.Name object at 0x7da1b1eeeef0>, <ast.Name object at 0x7da1b1eedab0>, <ast.Call object at 0x7da1b1eef2b0>, <ast.Call object at 0x7da1b1e03010>, <ast.Call object at 0x7da1b1e02770>, <ast.Call object at 0x7da1b1e01120>, <ast.Call object at 0x7da1b1e00c70>, <ast.Call object at 0x7da1b1e02b60>, <ast.Call object at 0x7da1b1e00d00>, <ast.Call object at 0x7da1b1e00490>, <ast.Call object at 0x7da1b1e003a0>]]]]
variable[payload] assign[=] call[name[self]._version.create, parameter[constant[POST], name[self]._uri]]
return[call[name[NotificationInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[body] = identifier[values] . identifier[unset] , identifier[priority] = identifier[values] . identifier[unset] , identifier[ttl] = identifier[values] . identifier[unset] ,
identifier[title] = identifier[values] . identifier[unset] , identifier[sound] = identifier[values] . identifier[unset] , identifier[action] = identifier[values] . identifier[unset] ,
identifier[data] = identifier[values] . identifier[unset] , identifier[apn] = identifier[values] . identifier[unset] , identifier[gcm] = identifier[values] . identifier[unset] ,
identifier[sms] = identifier[values] . identifier[unset] , identifier[facebook_messenger] = identifier[values] . identifier[unset] , identifier[fcm] = identifier[values] . identifier[unset] ,
identifier[segment] = identifier[values] . identifier[unset] , identifier[alexa] = identifier[values] . identifier[unset] , identifier[to_binding] = identifier[values] . identifier[unset] ,
identifier[identity] = identifier[values] . identifier[unset] , identifier[tag] = identifier[values] . identifier[unset] ):
literal[string]
identifier[data] = identifier[values] . identifier[of] ({
literal[string] : identifier[serialize] . identifier[map] ( identifier[identity] , keyword[lambda] identifier[e] : identifier[e] ),
literal[string] : identifier[serialize] . identifier[map] ( identifier[tag] , keyword[lambda] identifier[e] : identifier[e] ),
literal[string] : identifier[body] ,
literal[string] : identifier[priority] ,
literal[string] : identifier[ttl] ,
literal[string] : identifier[title] ,
literal[string] : identifier[sound] ,
literal[string] : identifier[action] ,
literal[string] : identifier[serialize] . identifier[object] ( identifier[data] ),
literal[string] : identifier[serialize] . identifier[object] ( identifier[apn] ),
literal[string] : identifier[serialize] . identifier[object] ( identifier[gcm] ),
literal[string] : identifier[serialize] . identifier[object] ( identifier[sms] ),
literal[string] : identifier[serialize] . identifier[object] ( identifier[facebook_messenger] ),
literal[string] : identifier[serialize] . identifier[object] ( identifier[fcm] ),
literal[string] : identifier[serialize] . identifier[map] ( identifier[segment] , keyword[lambda] identifier[e] : identifier[e] ),
literal[string] : identifier[serialize] . identifier[object] ( identifier[alexa] ),
literal[string] : identifier[serialize] . identifier[map] ( identifier[to_binding] , keyword[lambda] identifier[e] : identifier[e] ),
})
identifier[payload] = identifier[self] . identifier[_version] . identifier[create] (
literal[string] ,
identifier[self] . identifier[_uri] ,
identifier[data] = identifier[data] ,
)
keyword[return] identifier[NotificationInstance] ( identifier[self] . identifier[_version] , identifier[payload] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ],) | def create(self, body=values.unset, priority=values.unset, ttl=values.unset, title=values.unset, sound=values.unset, action=values.unset, data=values.unset, apn=values.unset, gcm=values.unset, sms=values.unset, facebook_messenger=values.unset, fcm=values.unset, segment=values.unset, alexa=values.unset, to_binding=values.unset, identity=values.unset, tag=values.unset):
"""
Create a new NotificationInstance
:param unicode body: The notification body text
:param NotificationInstance.Priority priority: The priority of the notification
:param unicode ttl: How long, in seconds, the notification is valid
:param unicode title: The notification title
:param unicode sound: The name of the sound to be played for the notification
:param unicode action: The actions to display for the notification
:param dict data: The custom key-value pairs of the notification's payload
:param dict apn: The APNS-specific payload that overrides corresponding attributes in a generic payload for APNS Bindings
:param dict gcm: The GCM-specific payload that overrides corresponding attributes in generic payload for GCM Bindings
:param dict sms: The SMS-specific payload that overrides corresponding attributes in generic payload for SMS Bindings
:param dict facebook_messenger: Deprecated
:param dict fcm: The FCM-specific payload that overrides corresponding attributes in generic payload for FCM Bindings
:param unicode segment: A Segment to notify
:param dict alexa: Deprecated
:param unicode to_binding: The destination address specified as a JSON string
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode tag: A tag that selects the Bindings to notify
:returns: Newly created NotificationInstance
:rtype: twilio.rest.notify.v1.service.notification.NotificationInstance
"""
data = values.of({'Identity': serialize.map(identity, lambda e: e), 'Tag': serialize.map(tag, lambda e: e), 'Body': body, 'Priority': priority, 'Ttl': ttl, 'Title': title, 'Sound': sound, 'Action': action, 'Data': serialize.object(data), 'Apn': serialize.object(apn), 'Gcm': serialize.object(gcm), 'Sms': serialize.object(sms), 'FacebookMessenger': serialize.object(facebook_messenger), 'Fcm': serialize.object(fcm), 'Segment': serialize.map(segment, lambda e: e), 'Alexa': serialize.object(alexa), 'ToBinding': serialize.map(to_binding, lambda e: e)})
payload = self._version.create('POST', self._uri, data=data)
return NotificationInstance(self._version, payload, service_sid=self._solution['service_sid']) |
def add_process(self, command=None, vsplit=False, start_directory=None):
"""
Add a new process to the current window. (vsplit/hsplit).
"""
assert command is None or isinstance(command, six.text_type)
assert start_directory is None or isinstance(start_directory, six.text_type)
window = self.arrangement.get_active_window()
pane = self._create_pane(window, command, start_directory=start_directory)
window.add_pane(pane, vsplit=vsplit)
pane.focus()
self.invalidate() | def function[add_process, parameter[self, command, vsplit, start_directory]]:
constant[
Add a new process to the current window. (vsplit/hsplit).
]
assert[<ast.BoolOp object at 0x7da20e9603a0>]
assert[<ast.BoolOp object at 0x7da20c76d5a0>]
variable[window] assign[=] call[name[self].arrangement.get_active_window, parameter[]]
variable[pane] assign[=] call[name[self]._create_pane, parameter[name[window], name[command]]]
call[name[window].add_pane, parameter[name[pane]]]
call[name[pane].focus, parameter[]]
call[name[self].invalidate, parameter[]] | keyword[def] identifier[add_process] ( identifier[self] , identifier[command] = keyword[None] , identifier[vsplit] = keyword[False] , identifier[start_directory] = keyword[None] ):
literal[string]
keyword[assert] identifier[command] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[command] , identifier[six] . identifier[text_type] )
keyword[assert] identifier[start_directory] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[start_directory] , identifier[six] . identifier[text_type] )
identifier[window] = identifier[self] . identifier[arrangement] . identifier[get_active_window] ()
identifier[pane] = identifier[self] . identifier[_create_pane] ( identifier[window] , identifier[command] , identifier[start_directory] = identifier[start_directory] )
identifier[window] . identifier[add_pane] ( identifier[pane] , identifier[vsplit] = identifier[vsplit] )
identifier[pane] . identifier[focus] ()
identifier[self] . identifier[invalidate] () | def add_process(self, command=None, vsplit=False, start_directory=None):
"""
Add a new process to the current window. (vsplit/hsplit).
"""
assert command is None or isinstance(command, six.text_type)
assert start_directory is None or isinstance(start_directory, six.text_type)
window = self.arrangement.get_active_window()
pane = self._create_pane(window, command, start_directory=start_directory)
window.add_pane(pane, vsplit=vsplit)
pane.focus()
self.invalidate() |
def lnprior(x):
"""Return the log prior given parameter vector `x`."""
per, t0, b = x
if b < -1 or b > 1:
return -np.inf
elif per < 7 or per > 10:
return -np.inf
elif t0 < 1978 or t0 > 1979:
return -np.inf
else:
return 0. | def function[lnprior, parameter[x]]:
constant[Return the log prior given parameter vector `x`.]
<ast.Tuple object at 0x7da1b0e0d3f0> assign[=] name[x]
if <ast.BoolOp object at 0x7da1b0e0ec20> begin[:]
return[<ast.UnaryOp object at 0x7da1b0e0dc60>] | keyword[def] identifier[lnprior] ( identifier[x] ):
literal[string]
identifier[per] , identifier[t0] , identifier[b] = identifier[x]
keyword[if] identifier[b] <- literal[int] keyword[or] identifier[b] > literal[int] :
keyword[return] - identifier[np] . identifier[inf]
keyword[elif] identifier[per] < literal[int] keyword[or] identifier[per] > literal[int] :
keyword[return] - identifier[np] . identifier[inf]
keyword[elif] identifier[t0] < literal[int] keyword[or] identifier[t0] > literal[int] :
keyword[return] - identifier[np] . identifier[inf]
keyword[else] :
keyword[return] literal[int] | def lnprior(x):
"""Return the log prior given parameter vector `x`."""
(per, t0, b) = x
if b < -1 or b > 1:
return -np.inf # depends on [control=['if'], data=[]]
elif per < 7 or per > 10:
return -np.inf # depends on [control=['if'], data=[]]
elif t0 < 1978 or t0 > 1979:
return -np.inf # depends on [control=['if'], data=[]]
else:
return 0.0 |
def methods(*meth):
"""
To explicitely set the methods to use without using @route
This can only be applied of methods. Not class.
:param meth: tuple of available method
:return:
"""
def decorator(f):
if not hasattr(f, '_methods_cache'):
f._methods_cache = [m.upper() for m in meth]
return f
return decorator | def function[methods, parameter[]]:
constant[
To explicitely set the methods to use without using @route
This can only be applied of methods. Not class.
:param meth: tuple of available method
:return:
]
def function[decorator, parameter[f]]:
if <ast.UnaryOp object at 0x7da1b209e920> begin[:]
name[f]._methods_cache assign[=] <ast.ListComp object at 0x7da1b209c9a0>
return[name[f]]
return[name[decorator]] | keyword[def] identifier[methods] (* identifier[meth] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[f] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[f] , literal[string] ):
identifier[f] . identifier[_methods_cache] =[ identifier[m] . identifier[upper] () keyword[for] identifier[m] keyword[in] identifier[meth] ]
keyword[return] identifier[f]
keyword[return] identifier[decorator] | def methods(*meth):
"""
To explicitely set the methods to use without using @route
This can only be applied of methods. Not class.
:param meth: tuple of available method
:return:
"""
def decorator(f):
if not hasattr(f, '_methods_cache'):
f._methods_cache = [m.upper() for m in meth] # depends on [control=['if'], data=[]]
return f
return decorator |
def load_config_module():
"""
If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure.
"""
import imp
config_path = config_location()
try:
config = imp.load_source('config', config_path)
except IOError:
log.critical('Config file not found. oaepub exiting...')
sys.exit('Config file not found. Please run \'oaepub configure\'')
else:
log.debug('Config file loaded from {0}'.format(config_path))
return config | def function[load_config_module, parameter[]]:
constant[
If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure.
]
import module[imp]
variable[config_path] assign[=] call[name[config_location], parameter[]]
<ast.Try object at 0x7da20c7961d0> | keyword[def] identifier[load_config_module] ():
literal[string]
keyword[import] identifier[imp]
identifier[config_path] = identifier[config_location] ()
keyword[try] :
identifier[config] = identifier[imp] . identifier[load_source] ( literal[string] , identifier[config_path] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[critical] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[string] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[config_path] ))
keyword[return] identifier[config] | def load_config_module():
"""
If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure.
"""
import imp
config_path = config_location()
try:
config = imp.load_source('config', config_path) # depends on [control=['try'], data=[]]
except IOError:
log.critical('Config file not found. oaepub exiting...')
sys.exit("Config file not found. Please run 'oaepub configure'") # depends on [control=['except'], data=[]]
else:
log.debug('Config file loaded from {0}'.format(config_path))
return config |
def get_seqstr(config, metadata):
"""
Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names.
"""
seq_abbrs = metadata.get('ScanningSequence', '').split('_')
seqs = [config['seq'].get(seq, seq) for seq in seq_abbrs]
variants = [config['seqvar'].get(var, var) for var in \
metadata.get('SequenceVariant', '').split('_')]
seqs = list_to_str(seqs)
if seq_abbrs[0]:
seqs += ' ({0})'.format(os.path.sep.join(seq_abbrs))
variants = list_to_str(variants)
return seqs, variants | def function[get_seqstr, parameter[config, metadata]]:
constant[
Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names.
]
variable[seq_abbrs] assign[=] call[call[name[metadata].get, parameter[constant[ScanningSequence], constant[]]].split, parameter[constant[_]]]
variable[seqs] assign[=] <ast.ListComp object at 0x7da1b104b430>
variable[variants] assign[=] <ast.ListComp object at 0x7da1b104b4c0>
variable[seqs] assign[=] call[name[list_to_str], parameter[name[seqs]]]
if call[name[seq_abbrs]][constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b104a410>
variable[variants] assign[=] call[name[list_to_str], parameter[name[variants]]]
return[tuple[[<ast.Name object at 0x7da1b101a020>, <ast.Name object at 0x7da1b1019d20>]]] | keyword[def] identifier[get_seqstr] ( identifier[config] , identifier[metadata] ):
literal[string]
identifier[seq_abbrs] = identifier[metadata] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
identifier[seqs] =[ identifier[config] [ literal[string] ]. identifier[get] ( identifier[seq] , identifier[seq] ) keyword[for] identifier[seq] keyword[in] identifier[seq_abbrs] ]
identifier[variants] =[ identifier[config] [ literal[string] ]. identifier[get] ( identifier[var] , identifier[var] ) keyword[for] identifier[var] keyword[in] identifier[metadata] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )]
identifier[seqs] = identifier[list_to_str] ( identifier[seqs] )
keyword[if] identifier[seq_abbrs] [ literal[int] ]:
identifier[seqs] += literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[sep] . identifier[join] ( identifier[seq_abbrs] ))
identifier[variants] = identifier[list_to_str] ( identifier[variants] )
keyword[return] identifier[seqs] , identifier[variants] | def get_seqstr(config, metadata):
"""
Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names.
"""
seq_abbrs = metadata.get('ScanningSequence', '').split('_')
seqs = [config['seq'].get(seq, seq) for seq in seq_abbrs]
variants = [config['seqvar'].get(var, var) for var in metadata.get('SequenceVariant', '').split('_')]
seqs = list_to_str(seqs)
if seq_abbrs[0]:
seqs += ' ({0})'.format(os.path.sep.join(seq_abbrs)) # depends on [control=['if'], data=[]]
variants = list_to_str(variants)
return (seqs, variants) |
def new_socket():
"""
Create a new socket with OS-specific parameters
Try to set SO_REUSEPORT for BSD-flavored systems if it's an option.
Catches errors if not.
"""
new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
# noinspection PyUnresolvedReferences
reuseport = socket.SO_REUSEPORT
except AttributeError:
pass
else:
try:
new_sock.setsockopt(socket.SOL_SOCKET, reuseport, 1)
except (OSError, socket.error) as err:
# OSError on python 3, socket.error on python 2
if err.errno != errno.ENOPROTOOPT:
raise
return new_sock | def function[new_socket, parameter[]]:
constant[
Create a new socket with OS-specific parameters
Try to set SO_REUSEPORT for BSD-flavored systems if it's an option.
Catches errors if not.
]
variable[new_sock] assign[=] call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_STREAM]]
call[name[new_sock].setsockopt, parameter[name[socket].SOL_SOCKET, name[socket].SO_REUSEADDR, constant[1]]]
<ast.Try object at 0x7da18dc99b10>
return[name[new_sock]] | keyword[def] identifier[new_socket] ():
literal[string]
identifier[new_sock] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_STREAM] )
identifier[new_sock] . identifier[setsockopt] ( identifier[socket] . identifier[SOL_SOCKET] , identifier[socket] . identifier[SO_REUSEADDR] , literal[int] )
keyword[try] :
identifier[reuseport] = identifier[socket] . identifier[SO_REUSEPORT]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
keyword[try] :
identifier[new_sock] . identifier[setsockopt] ( identifier[socket] . identifier[SOL_SOCKET] , identifier[reuseport] , literal[int] )
keyword[except] ( identifier[OSError] , identifier[socket] . identifier[error] ) keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[errno] != identifier[errno] . identifier[ENOPROTOOPT] :
keyword[raise]
keyword[return] identifier[new_sock] | def new_socket():
"""
Create a new socket with OS-specific parameters
Try to set SO_REUSEPORT for BSD-flavored systems if it's an option.
Catches errors if not.
"""
new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
# noinspection PyUnresolvedReferences
reuseport = socket.SO_REUSEPORT # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
try:
new_sock.setsockopt(socket.SOL_SOCKET, reuseport, 1) # depends on [control=['try'], data=[]]
except (OSError, socket.error) as err:
# OSError on python 3, socket.error on python 2
if err.errno != errno.ENOPROTOOPT:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']]
return new_sock |
def show_configure(self):
"""
Show the configuration window, or deiconify (un-minimise) it if it's already open.
"""
logging.info("Displaying configuration window")
if self.configWindow is None:
self.configWindow = ConfigWindow(self)
self.configWindow.show()
else:
self.configWindow.deiconify() | def function[show_configure, parameter[self]]:
constant[
Show the configuration window, or deiconify (un-minimise) it if it's already open.
]
call[name[logging].info, parameter[constant[Displaying configuration window]]]
if compare[name[self].configWindow is constant[None]] begin[:]
name[self].configWindow assign[=] call[name[ConfigWindow], parameter[name[self]]]
call[name[self].configWindow.show, parameter[]] | keyword[def] identifier[show_configure] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
keyword[if] identifier[self] . identifier[configWindow] keyword[is] keyword[None] :
identifier[self] . identifier[configWindow] = identifier[ConfigWindow] ( identifier[self] )
identifier[self] . identifier[configWindow] . identifier[show] ()
keyword[else] :
identifier[self] . identifier[configWindow] . identifier[deiconify] () | def show_configure(self):
"""
Show the configuration window, or deiconify (un-minimise) it if it's already open.
"""
logging.info('Displaying configuration window')
if self.configWindow is None:
self.configWindow = ConfigWindow(self)
self.configWindow.show() # depends on [control=['if'], data=[]]
else:
self.configWindow.deiconify() |
def circle_plot(netIn, ax, nodelabels=None, linestyle='k-', nodesize=1000, cmap='Set2'):
r'''
Function draws "circle plot" and exports axis handles
Parameters
-------------
netIn : temporal network input (graphlet or contact)
ax : matplotlib ax handles.
nodelabels : list
nodes labels. List of strings
linestyle : str
line style
nodesize : int
size of nodes
cmap : str
matplotlib colormap
Returns
-------
ax : axis handle
Example
-------
>>> import teneto
>>> import numpy
>>> import matplotlib.pyplot as plt
>>> G = np.zeros([6, 6])
>>> i = [0, 0, 0, 1, 2, 3, 4]
>>> j = [3, 4, 5, 5, 4, 5, 5]
>>> G[i, j] = 1
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.circle_plot(G, ax)
>>> fig.show()
.. plot::
import teneto
import numpy
import matplotlib.pyplot as plt
G = np.zeros([6, 6])
i = [0, 0, 0, 1, 2, 3, 4]
j = [3, 4, 5, 5, 4, 5, 5]
G[i, j] = 1
fig, ax = plt.subplots(1)
teneto.plot.circle_plot(G, ax)
fig.show()
'''
# Get input type (C or G)
inputType = checkInput(netIn, conMat=1)
if nodelabels is None:
nodelabels = []
# Convert C representation to G
if inputType == 'M':
shape = np.shape(netIn)
edg = np.where(np.abs(netIn) > 0)
contacts = [tuple([edg[0][i], edg[1][i]])
for i in range(0, len(edg[0]))]
netIn = {}
netIn['contacts'] = contacts
netIn['netshape'] = shape
elif inputType == 'G':
netIn = graphlet2contact(netIn)
inputType = 'C'
if inputType == 'C':
edgeList = [tuple(np.array(e[0:2]) + e[2] * netIn['netshape'][0])
for e in netIn['contacts']]
elif inputType == 'M':
edgeList = netIn['contacts']
n = netIn['netshape'][0]
# Get positions of node on unit circle
posx = [math.cos((2 * math.pi * i) / n) for i in range(0, n)]
posy = [math.sin((2 * math.pi * i) / n) for i in range(0, n)]
# Get Bezier lines in a circle
cmap = cm.get_cmap(cmap)(np.linspace(0, 1, n))
for edge in edgeList:
bvx, bvy = bezier_circle(
(posx[edge[0]], posy[edge[0]]), (posx[edge[1]], posy[edge[1]]), 20)
ax.plot(bvx, bvy, linestyle, zorder=0)
for i in range(n):
ax.scatter(posx[i], posy[i], s=nodesize, c=cmap[i], zorder=1)
# Remove things that make plot unpretty
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
# make plot a square
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect((x1 - x0) / (y1 - y0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
return ax | def function[circle_plot, parameter[netIn, ax, nodelabels, linestyle, nodesize, cmap]]:
constant[
Function draws "circle plot" and exports axis handles
Parameters
-------------
netIn : temporal network input (graphlet or contact)
ax : matplotlib ax handles.
nodelabels : list
nodes labels. List of strings
linestyle : str
line style
nodesize : int
size of nodes
cmap : str
matplotlib colormap
Returns
-------
ax : axis handle
Example
-------
>>> import teneto
>>> import numpy
>>> import matplotlib.pyplot as plt
>>> G = np.zeros([6, 6])
>>> i = [0, 0, 0, 1, 2, 3, 4]
>>> j = [3, 4, 5, 5, 4, 5, 5]
>>> G[i, j] = 1
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.circle_plot(G, ax)
>>> fig.show()
.. plot::
import teneto
import numpy
import matplotlib.pyplot as plt
G = np.zeros([6, 6])
i = [0, 0, 0, 1, 2, 3, 4]
j = [3, 4, 5, 5, 4, 5, 5]
G[i, j] = 1
fig, ax = plt.subplots(1)
teneto.plot.circle_plot(G, ax)
fig.show()
]
variable[inputType] assign[=] call[name[checkInput], parameter[name[netIn]]]
if compare[name[nodelabels] is constant[None]] begin[:]
variable[nodelabels] assign[=] list[[]]
if compare[name[inputType] equal[==] constant[M]] begin[:]
variable[shape] assign[=] call[name[np].shape, parameter[name[netIn]]]
variable[edg] assign[=] call[name[np].where, parameter[compare[call[name[np].abs, parameter[name[netIn]]] greater[>] constant[0]]]]
variable[contacts] assign[=] <ast.ListComp object at 0x7da2041db4c0>
variable[netIn] assign[=] dictionary[[], []]
call[name[netIn]][constant[contacts]] assign[=] name[contacts]
call[name[netIn]][constant[netshape]] assign[=] name[shape]
if compare[name[inputType] equal[==] constant[C]] begin[:]
variable[edgeList] assign[=] <ast.ListComp object at 0x7da2041d9000>
variable[n] assign[=] call[call[name[netIn]][constant[netshape]]][constant[0]]
variable[posx] assign[=] <ast.ListComp object at 0x7da204564dc0>
variable[posy] assign[=] <ast.ListComp object at 0x7da2045668f0>
variable[cmap] assign[=] call[call[name[cm].get_cmap, parameter[name[cmap]]], parameter[call[name[np].linspace, parameter[constant[0], constant[1], name[n]]]]]
for taget[name[edge]] in starred[name[edgeList]] begin[:]
<ast.Tuple object at 0x7da204564b50> assign[=] call[name[bezier_circle], parameter[tuple[[<ast.Subscript object at 0x7da204567d30>, <ast.Subscript object at 0x7da204567be0>]], tuple[[<ast.Subscript object at 0x7da2045674c0>, <ast.Subscript object at 0x7da204567190>]], constant[20]]]
call[name[ax].plot, parameter[name[bvx], name[bvy], name[linestyle]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:]
call[name[ax].scatter, parameter[call[name[posx]][name[i]], call[name[posy]][name[i]]]]
call[name[ax].set_yticklabels, parameter[list[[]]]]
call[name[ax].set_xticklabels, parameter[list[[]]]]
call[name[ax].set_yticks, parameter[list[[]]]]
call[name[ax].set_xticks, parameter[list[[]]]]
call[name[ax].set_frame_on, parameter[constant[False]]]
<ast.Tuple object at 0x7da204564670> assign[=] call[name[ax].get_xlim, parameter[]]
<ast.Tuple object at 0x7da2045677c0> assign[=] call[name[ax].get_ylim, parameter[]]
call[name[ax].set_aspect, parameter[binary_operation[binary_operation[name[x1] - name[x0]] / binary_operation[name[y1] - name[y0]]]]]
call[call[name[ax].spines][constant[top]].set_visible, parameter[constant[False]]]
call[call[name[ax].spines][constant[right]].set_visible, parameter[constant[False]]]
call[call[name[ax].spines][constant[left]].set_visible, parameter[constant[False]]]
call[call[name[ax].spines][constant[bottom]].set_visible, parameter[constant[False]]]
return[name[ax]] | keyword[def] identifier[circle_plot] ( identifier[netIn] , identifier[ax] , identifier[nodelabels] = keyword[None] , identifier[linestyle] = literal[string] , identifier[nodesize] = literal[int] , identifier[cmap] = literal[string] ):
literal[string]
identifier[inputType] = identifier[checkInput] ( identifier[netIn] , identifier[conMat] = literal[int] )
keyword[if] identifier[nodelabels] keyword[is] keyword[None] :
identifier[nodelabels] =[]
keyword[if] identifier[inputType] == literal[string] :
identifier[shape] = identifier[np] . identifier[shape] ( identifier[netIn] )
identifier[edg] = identifier[np] . identifier[where] ( identifier[np] . identifier[abs] ( identifier[netIn] )> literal[int] )
identifier[contacts] =[ identifier[tuple] ([ identifier[edg] [ literal[int] ][ identifier[i] ], identifier[edg] [ literal[int] ][ identifier[i] ]])
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[edg] [ literal[int] ]))]
identifier[netIn] ={}
identifier[netIn] [ literal[string] ]= identifier[contacts]
identifier[netIn] [ literal[string] ]= identifier[shape]
keyword[elif] identifier[inputType] == literal[string] :
identifier[netIn] = identifier[graphlet2contact] ( identifier[netIn] )
identifier[inputType] = literal[string]
keyword[if] identifier[inputType] == literal[string] :
identifier[edgeList] =[ identifier[tuple] ( identifier[np] . identifier[array] ( identifier[e] [ literal[int] : literal[int] ])+ identifier[e] [ literal[int] ]* identifier[netIn] [ literal[string] ][ literal[int] ])
keyword[for] identifier[e] keyword[in] identifier[netIn] [ literal[string] ]]
keyword[elif] identifier[inputType] == literal[string] :
identifier[edgeList] = identifier[netIn] [ literal[string] ]
identifier[n] = identifier[netIn] [ literal[string] ][ literal[int] ]
identifier[posx] =[ identifier[math] . identifier[cos] (( literal[int] * identifier[math] . identifier[pi] * identifier[i] )/ identifier[n] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] )]
identifier[posy] =[ identifier[math] . identifier[sin] (( literal[int] * identifier[math] . identifier[pi] * identifier[i] )/ identifier[n] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] )]
identifier[cmap] = identifier[cm] . identifier[get_cmap] ( identifier[cmap] )( identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[n] ))
keyword[for] identifier[edge] keyword[in] identifier[edgeList] :
identifier[bvx] , identifier[bvy] = identifier[bezier_circle] (
( identifier[posx] [ identifier[edge] [ literal[int] ]], identifier[posy] [ identifier[edge] [ literal[int] ]]),( identifier[posx] [ identifier[edge] [ literal[int] ]], identifier[posy] [ identifier[edge] [ literal[int] ]]), literal[int] )
identifier[ax] . identifier[plot] ( identifier[bvx] , identifier[bvy] , identifier[linestyle] , identifier[zorder] = literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
identifier[ax] . identifier[scatter] ( identifier[posx] [ identifier[i] ], identifier[posy] [ identifier[i] ], identifier[s] = identifier[nodesize] , identifier[c] = identifier[cmap] [ identifier[i] ], identifier[zorder] = literal[int] )
identifier[ax] . identifier[set_yticklabels] ([])
identifier[ax] . identifier[set_xticklabels] ([])
identifier[ax] . identifier[set_yticks] ([])
identifier[ax] . identifier[set_xticks] ([])
identifier[ax] . identifier[set_frame_on] ( keyword[False] )
identifier[x0] , identifier[x1] = identifier[ax] . identifier[get_xlim] ()
identifier[y0] , identifier[y1] = identifier[ax] . identifier[get_ylim] ()
identifier[ax] . identifier[set_aspect] (( identifier[x1] - identifier[x0] )/( identifier[y1] - identifier[y0] ))
identifier[ax] . identifier[spines] [ literal[string] ]. identifier[set_visible] ( keyword[False] )
identifier[ax] . identifier[spines] [ literal[string] ]. identifier[set_visible] ( keyword[False] )
identifier[ax] . identifier[spines] [ literal[string] ]. identifier[set_visible] ( keyword[False] )
identifier[ax] . identifier[spines] [ literal[string] ]. identifier[set_visible] ( keyword[False] )
keyword[return] identifier[ax] | def circle_plot(netIn, ax, nodelabels=None, linestyle='k-', nodesize=1000, cmap='Set2'):
"""
Function draws "circle plot" and exports axis handles
Parameters
-------------
netIn : temporal network input (graphlet or contact)
ax : matplotlib ax handles.
nodelabels : list
nodes labels. List of strings
linestyle : str
line style
nodesize : int
size of nodes
cmap : str
matplotlib colormap
Returns
-------
ax : axis handle
Example
-------
>>> import teneto
>>> import numpy
>>> import matplotlib.pyplot as plt
>>> G = np.zeros([6, 6])
>>> i = [0, 0, 0, 1, 2, 3, 4]
>>> j = [3, 4, 5, 5, 4, 5, 5]
>>> G[i, j] = 1
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.circle_plot(G, ax)
>>> fig.show()
.. plot::
import teneto
import numpy
import matplotlib.pyplot as plt
G = np.zeros([6, 6])
i = [0, 0, 0, 1, 2, 3, 4]
j = [3, 4, 5, 5, 4, 5, 5]
G[i, j] = 1
fig, ax = plt.subplots(1)
teneto.plot.circle_plot(G, ax)
fig.show()
"""
# Get input type (C or G)
inputType = checkInput(netIn, conMat=1)
if nodelabels is None:
nodelabels = [] # depends on [control=['if'], data=['nodelabels']]
# Convert C representation to G
if inputType == 'M':
shape = np.shape(netIn)
edg = np.where(np.abs(netIn) > 0)
contacts = [tuple([edg[0][i], edg[1][i]]) for i in range(0, len(edg[0]))]
netIn = {}
netIn['contacts'] = contacts
netIn['netshape'] = shape # depends on [control=['if'], data=[]]
elif inputType == 'G':
netIn = graphlet2contact(netIn)
inputType = 'C' # depends on [control=['if'], data=['inputType']]
if inputType == 'C':
edgeList = [tuple(np.array(e[0:2]) + e[2] * netIn['netshape'][0]) for e in netIn['contacts']] # depends on [control=['if'], data=[]]
elif inputType == 'M':
edgeList = netIn['contacts'] # depends on [control=['if'], data=[]]
n = netIn['netshape'][0]
# Get positions of node on unit circle
posx = [math.cos(2 * math.pi * i / n) for i in range(0, n)]
posy = [math.sin(2 * math.pi * i / n) for i in range(0, n)]
# Get Bezier lines in a circle
cmap = cm.get_cmap(cmap)(np.linspace(0, 1, n))
for edge in edgeList:
(bvx, bvy) = bezier_circle((posx[edge[0]], posy[edge[0]]), (posx[edge[1]], posy[edge[1]]), 20)
ax.plot(bvx, bvy, linestyle, zorder=0) # depends on [control=['for'], data=['edge']]
for i in range(n):
ax.scatter(posx[i], posy[i], s=nodesize, c=cmap[i], zorder=1) # depends on [control=['for'], data=['i']]
# Remove things that make plot unpretty
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
# make plot a square
(x0, x1) = ax.get_xlim()
(y0, y1) = ax.get_ylim()
ax.set_aspect((x1 - x0) / (y1 - y0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
return ax |
def _zmaxFindStart(z,Ez,pot):
"""
NAME:
_zmaxFindStart
PURPOSE:
Find adequate end point to solve for zmax
INPUT:
z - height
Ez - vertical energy
pot - potential
OUTPUT:
zend
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if z == 0.: ztry= 0.00001
else: ztry= 2.*nu.fabs(z)
while (Ez-potentialVertical(ztry,pot)) > 0.:
ztry*= 2.
if ztry > 100.: #pragma: no cover
raise OverflowError
return ztry | def function[_zmaxFindStart, parameter[z, Ez, pot]]:
constant[
NAME:
_zmaxFindStart
PURPOSE:
Find adequate end point to solve for zmax
INPUT:
z - height
Ez - vertical energy
pot - potential
OUTPUT:
zend
HISTORY:
2012-06-01 - Written - Bovy (IAS)
]
if compare[name[z] equal[==] constant[0.0]] begin[:]
variable[ztry] assign[=] constant[1e-05]
while compare[binary_operation[name[Ez] - call[name[potentialVertical], parameter[name[ztry], name[pot]]]] greater[>] constant[0.0]] begin[:]
<ast.AugAssign object at 0x7da204566fb0>
if compare[name[ztry] greater[>] constant[100.0]] begin[:]
<ast.Raise object at 0x7da204566c80>
return[name[ztry]] | keyword[def] identifier[_zmaxFindStart] ( identifier[z] , identifier[Ez] , identifier[pot] ):
literal[string]
keyword[if] identifier[z] == literal[int] : identifier[ztry] = literal[int]
keyword[else] : identifier[ztry] = literal[int] * identifier[nu] . identifier[fabs] ( identifier[z] )
keyword[while] ( identifier[Ez] - identifier[potentialVertical] ( identifier[ztry] , identifier[pot] ))> literal[int] :
identifier[ztry] *= literal[int]
keyword[if] identifier[ztry] > literal[int] :
keyword[raise] identifier[OverflowError]
keyword[return] identifier[ztry] | def _zmaxFindStart(z, Ez, pot):
"""
NAME:
_zmaxFindStart
PURPOSE:
Find adequate end point to solve for zmax
INPUT:
z - height
Ez - vertical energy
pot - potential
OUTPUT:
zend
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if z == 0.0:
ztry = 1e-05 # depends on [control=['if'], data=[]]
else:
ztry = 2.0 * nu.fabs(z)
while Ez - potentialVertical(ztry, pot) > 0.0:
ztry *= 2.0
if ztry > 100.0: #pragma: no cover
raise OverflowError # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return ztry |
def head(source, count: int = 5):
"""
Displays a specified number of elements in a source object of many
different possible types.
:param source:
DataFrames will show *count* rows of that DataFrame. A list, tuple or
other iterable, will show the first *count* rows. Dictionaries will
show *count* keys from the dictionary, which will be randomly selected
unless you are using an OrderedDict. Strings will show the first
*count* characters.
:param count:
The number of elements to show from the source.
"""
r = _get_report()
r.append_body(render_texts.head(source, count=count))
r.stdout_interceptor.write_source('[ADDED] Head\n') | def function[head, parameter[source, count]]:
constant[
Displays a specified number of elements in a source object of many
different possible types.
:param source:
DataFrames will show *count* rows of that DataFrame. A list, tuple or
other iterable, will show the first *count* rows. Dictionaries will
show *count* keys from the dictionary, which will be randomly selected
unless you are using an OrderedDict. Strings will show the first
*count* characters.
:param count:
The number of elements to show from the source.
]
variable[r] assign[=] call[name[_get_report], parameter[]]
call[name[r].append_body, parameter[call[name[render_texts].head, parameter[name[source]]]]]
call[name[r].stdout_interceptor.write_source, parameter[constant[[ADDED] Head
]]] | keyword[def] identifier[head] ( identifier[source] , identifier[count] : identifier[int] = literal[int] ):
literal[string]
identifier[r] = identifier[_get_report] ()
identifier[r] . identifier[append_body] ( identifier[render_texts] . identifier[head] ( identifier[source] , identifier[count] = identifier[count] ))
identifier[r] . identifier[stdout_interceptor] . identifier[write_source] ( literal[string] ) | def head(source, count: int=5):
"""
Displays a specified number of elements in a source object of many
different possible types.
:param source:
DataFrames will show *count* rows of that DataFrame. A list, tuple or
other iterable, will show the first *count* rows. Dictionaries will
show *count* keys from the dictionary, which will be randomly selected
unless you are using an OrderedDict. Strings will show the first
*count* characters.
:param count:
The number of elements to show from the source.
"""
r = _get_report()
r.append_body(render_texts.head(source, count=count))
r.stdout_interceptor.write_source('[ADDED] Head\n') |
def eval_str(s: str, ctx: compiler.CompilerContext, module: types.ModuleType, eof: Any):
"""Evaluate the forms in a string into a Python module AST node."""
last = eof
for form in reader.read_str(s, resolver=runtime.resolve_alias, eof=eof):
last = compiler.compile_and_exec_form(form, ctx, module)
return last | def function[eval_str, parameter[s, ctx, module, eof]]:
constant[Evaluate the forms in a string into a Python module AST node.]
variable[last] assign[=] name[eof]
for taget[name[form]] in starred[call[name[reader].read_str, parameter[name[s]]]] begin[:]
variable[last] assign[=] call[name[compiler].compile_and_exec_form, parameter[name[form], name[ctx], name[module]]]
return[name[last]] | keyword[def] identifier[eval_str] ( identifier[s] : identifier[str] , identifier[ctx] : identifier[compiler] . identifier[CompilerContext] , identifier[module] : identifier[types] . identifier[ModuleType] , identifier[eof] : identifier[Any] ):
literal[string]
identifier[last] = identifier[eof]
keyword[for] identifier[form] keyword[in] identifier[reader] . identifier[read_str] ( identifier[s] , identifier[resolver] = identifier[runtime] . identifier[resolve_alias] , identifier[eof] = identifier[eof] ):
identifier[last] = identifier[compiler] . identifier[compile_and_exec_form] ( identifier[form] , identifier[ctx] , identifier[module] )
keyword[return] identifier[last] | def eval_str(s: str, ctx: compiler.CompilerContext, module: types.ModuleType, eof: Any):
"""Evaluate the forms in a string into a Python module AST node."""
last = eof
for form in reader.read_str(s, resolver=runtime.resolve_alias, eof=eof):
last = compiler.compile_and_exec_form(form, ctx, module) # depends on [control=['for'], data=['form']]
return last |
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b"" | def function[_find_terminator, parameter[self, iterator]]:
constant[The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
]
for taget[name[line]] in starred[name[iterator]] begin[:]
if <ast.UnaryOp object at 0x7da18f810ac0> begin[:]
break
variable[line] assign[=] call[name[line].strip, parameter[]]
if name[line] begin[:]
return[name[line]]
return[constant[b'']] | keyword[def] identifier[_find_terminator] ( identifier[self] , identifier[iterator] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[iterator] :
keyword[if] keyword[not] identifier[line] :
keyword[break]
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[line] :
keyword[return] identifier[line]
keyword[return] literal[string] | def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break # depends on [control=['if'], data=[]]
line = line.strip()
if line:
return line # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return b'' |
def create_body_index(xml_string):
""" Extract a name to index dictionary from 6dof settings xml """
xml = ET.fromstring(xml_string)
body_to_index = {}
for index, body in enumerate(xml.findall("*/Body/Name")):
body_to_index[body.text.strip()] = index
return body_to_index | def function[create_body_index, parameter[xml_string]]:
constant[ Extract a name to index dictionary from 6dof settings xml ]
variable[xml] assign[=] call[name[ET].fromstring, parameter[name[xml_string]]]
variable[body_to_index] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b00dab90>, <ast.Name object at 0x7da1b00d87f0>]]] in starred[call[name[enumerate], parameter[call[name[xml].findall, parameter[constant[*/Body/Name]]]]]] begin[:]
call[name[body_to_index]][call[name[body].text.strip, parameter[]]] assign[=] name[index]
return[name[body_to_index]] | keyword[def] identifier[create_body_index] ( identifier[xml_string] ):
literal[string]
identifier[xml] = identifier[ET] . identifier[fromstring] ( identifier[xml_string] )
identifier[body_to_index] ={}
keyword[for] identifier[index] , identifier[body] keyword[in] identifier[enumerate] ( identifier[xml] . identifier[findall] ( literal[string] )):
identifier[body_to_index] [ identifier[body] . identifier[text] . identifier[strip] ()]= identifier[index]
keyword[return] identifier[body_to_index] | def create_body_index(xml_string):
""" Extract a name to index dictionary from 6dof settings xml """
xml = ET.fromstring(xml_string)
body_to_index = {}
for (index, body) in enumerate(xml.findall('*/Body/Name')):
body_to_index[body.text.strip()] = index # depends on [control=['for'], data=[]]
return body_to_index |
def runTask(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task.
"""
# Initialize the task.
task.initialize(self)
# Start the task.
task.start()
# Lets other threads run
time.sleep(0)
# Wait for it to finish.
res = task.wait(timeout=timeout)
# Now we're done
return res | def function[runTask, parameter[self, task, timeout]]:
constant[Run a child task to completion. Returns the result of
the child task.
]
call[name[task].initialize, parameter[name[self]]]
call[name[task].start, parameter[]]
call[name[time].sleep, parameter[constant[0]]]
variable[res] assign[=] call[name[task].wait, parameter[]]
return[name[res]] | keyword[def] identifier[runTask] ( identifier[self] , identifier[task] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[task] . identifier[initialize] ( identifier[self] )
identifier[task] . identifier[start] ()
identifier[time] . identifier[sleep] ( literal[int] )
identifier[res] = identifier[task] . identifier[wait] ( identifier[timeout] = identifier[timeout] )
keyword[return] identifier[res] | def runTask(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task.
"""
# Initialize the task.
task.initialize(self)
# Start the task.
task.start()
# Lets other threads run
time.sleep(0)
# Wait for it to finish.
res = task.wait(timeout=timeout)
# Now we're done
return res |
def trace(fun, *a, **k):
""" define a tracer for a rule function
for log and statistic purposes """
@wraps(fun)
def tracer(*a, **k):
ret = fun(*a, **k)
print('trace:fun: %s\n ret=%s\n a=%s\nk%s\n' %
(str(fun), str(ret), str(a), str(k)))
return ret
return tracer | def function[trace, parameter[fun]]:
constant[ define a tracer for a rule function
for log and statistic purposes ]
def function[tracer, parameter[]]:
variable[ret] assign[=] call[name[fun], parameter[<ast.Starred object at 0x7da1b28d81f0>]]
call[name[print], parameter[binary_operation[constant[trace:fun: %s
ret=%s
a=%s
k%s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b28d8490>, <ast.Call object at 0x7da1b28d8400>, <ast.Call object at 0x7da1b28b3b80>, <ast.Call object at 0x7da1b28b3be0>]]]]]
return[name[ret]]
return[name[tracer]] | keyword[def] identifier[trace] ( identifier[fun] ,* identifier[a] ,** identifier[k] ):
literal[string]
@ identifier[wraps] ( identifier[fun] )
keyword[def] identifier[tracer] (* identifier[a] ,** identifier[k] ):
identifier[ret] = identifier[fun] (* identifier[a] ,** identifier[k] )
identifier[print] ( literal[string] %
( identifier[str] ( identifier[fun] ), identifier[str] ( identifier[ret] ), identifier[str] ( identifier[a] ), identifier[str] ( identifier[k] )))
keyword[return] identifier[ret]
keyword[return] identifier[tracer] | def trace(fun, *a, **k):
""" define a tracer for a rule function
for log and statistic purposes """
@wraps(fun)
def tracer(*a, **k):
ret = fun(*a, **k)
print('trace:fun: %s\n ret=%s\n a=%s\nk%s\n' % (str(fun), str(ret), str(a), str(k)))
return ret
return tracer |
def ignore(self, matcher):
'''
Unblock and ignore the matched events, if any.
'''
events = self.eventtree.findAndRemove(matcher)
for e in events:
self.queue.unblock(e)
e.canignore = True | def function[ignore, parameter[self, matcher]]:
constant[
Unblock and ignore the matched events, if any.
]
variable[events] assign[=] call[name[self].eventtree.findAndRemove, parameter[name[matcher]]]
for taget[name[e]] in starred[name[events]] begin[:]
call[name[self].queue.unblock, parameter[name[e]]]
name[e].canignore assign[=] constant[True] | keyword[def] identifier[ignore] ( identifier[self] , identifier[matcher] ):
literal[string]
identifier[events] = identifier[self] . identifier[eventtree] . identifier[findAndRemove] ( identifier[matcher] )
keyword[for] identifier[e] keyword[in] identifier[events] :
identifier[self] . identifier[queue] . identifier[unblock] ( identifier[e] )
identifier[e] . identifier[canignore] = keyword[True] | def ignore(self, matcher):
"""
Unblock and ignore the matched events, if any.
"""
events = self.eventtree.findAndRemove(matcher)
for e in events:
self.queue.unblock(e)
e.canignore = True # depends on [control=['for'], data=['e']] |
def process_event(event):
"""Pretty prints events.
Prints all events that occur with two spaces between each new
conversation and a single space between turns of a conversation.
Args:
event(event.Event): The current event to process.
"""
if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
print()
print(event)
if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and
event.args and not event.args['with_follow_on_turn']):
print()
if event.type == EventType.ON_DEVICE_ACTION:
for command, params in event.actions:
print('Do command', command, 'with params', str(params)) | def function[process_event, parameter[event]]:
constant[Pretty prints events.
Prints all events that occur with two spaces between each new
conversation and a single space between turns of a conversation.
Args:
event(event.Event): The current event to process.
]
if compare[name[event].type equal[==] name[EventType].ON_CONVERSATION_TURN_STARTED] begin[:]
call[name[print], parameter[]]
call[name[print], parameter[name[event]]]
if <ast.BoolOp object at 0x7da2041d81c0> begin[:]
call[name[print], parameter[]]
if compare[name[event].type equal[==] name[EventType].ON_DEVICE_ACTION] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2041d8610>, <ast.Name object at 0x7da2041d8190>]]] in starred[name[event].actions] begin[:]
call[name[print], parameter[constant[Do command], name[command], constant[with params], call[name[str], parameter[name[params]]]]] | keyword[def] identifier[process_event] ( identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[type] == identifier[EventType] . identifier[ON_CONVERSATION_TURN_STARTED] :
identifier[print] ()
identifier[print] ( identifier[event] )
keyword[if] ( identifier[event] . identifier[type] == identifier[EventType] . identifier[ON_CONVERSATION_TURN_FINISHED] keyword[and]
identifier[event] . identifier[args] keyword[and] keyword[not] identifier[event] . identifier[args] [ literal[string] ]):
identifier[print] ()
keyword[if] identifier[event] . identifier[type] == identifier[EventType] . identifier[ON_DEVICE_ACTION] :
keyword[for] identifier[command] , identifier[params] keyword[in] identifier[event] . identifier[actions] :
identifier[print] ( literal[string] , identifier[command] , literal[string] , identifier[str] ( identifier[params] )) | def process_event(event):
"""Pretty prints events.
Prints all events that occur with two spaces between each new
conversation and a single space between turns of a conversation.
Args:
event(event.Event): The current event to process.
"""
if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
print() # depends on [control=['if'], data=[]]
print(event)
if event.type == EventType.ON_CONVERSATION_TURN_FINISHED and event.args and (not event.args['with_follow_on_turn']):
print() # depends on [control=['if'], data=[]]
if event.type == EventType.ON_DEVICE_ACTION:
for (command, params) in event.actions:
print('Do command', command, 'with params', str(params)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def send(self, msg):
'''Send the message. If message is an iterable, then send
all the messages.'''
app = self.app or current_app
mailer = app.extensions['marrowmailer']
mailer.start()
if not hasattr(msg, '__iter__'):
result = mailer.send(msg)
else:
result = map(lambda message: mailer.send(message), msg)
mailer.stop()
return result | def function[send, parameter[self, msg]]:
constant[Send the message. If message is an iterable, then send
all the messages.]
variable[app] assign[=] <ast.BoolOp object at 0x7da1b2350940>
variable[mailer] assign[=] call[name[app].extensions][constant[marrowmailer]]
call[name[mailer].start, parameter[]]
if <ast.UnaryOp object at 0x7da1b2350970> begin[:]
variable[result] assign[=] call[name[mailer].send, parameter[name[msg]]]
call[name[mailer].stop, parameter[]]
return[name[result]] | keyword[def] identifier[send] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[app] = identifier[self] . identifier[app] keyword[or] identifier[current_app]
identifier[mailer] = identifier[app] . identifier[extensions] [ literal[string] ]
identifier[mailer] . identifier[start] ()
keyword[if] keyword[not] identifier[hasattr] ( identifier[msg] , literal[string] ):
identifier[result] = identifier[mailer] . identifier[send] ( identifier[msg] )
keyword[else] :
identifier[result] = identifier[map] ( keyword[lambda] identifier[message] : identifier[mailer] . identifier[send] ( identifier[message] ), identifier[msg] )
identifier[mailer] . identifier[stop] ()
keyword[return] identifier[result] | def send(self, msg):
"""Send the message. If message is an iterable, then send
all the messages."""
app = self.app or current_app
mailer = app.extensions['marrowmailer']
mailer.start()
if not hasattr(msg, '__iter__'):
result = mailer.send(msg) # depends on [control=['if'], data=[]]
else:
result = map(lambda message: mailer.send(message), msg)
mailer.stop()
return result |
def add_boolean(self, b):
"""
Add a boolean value to the stream.
:param bool b: boolean value to add
"""
if b:
self.packet.write(one_byte)
else:
self.packet.write(zero_byte)
return self | def function[add_boolean, parameter[self, b]]:
constant[
Add a boolean value to the stream.
:param bool b: boolean value to add
]
if name[b] begin[:]
call[name[self].packet.write, parameter[name[one_byte]]]
return[name[self]] | keyword[def] identifier[add_boolean] ( identifier[self] , identifier[b] ):
literal[string]
keyword[if] identifier[b] :
identifier[self] . identifier[packet] . identifier[write] ( identifier[one_byte] )
keyword[else] :
identifier[self] . identifier[packet] . identifier[write] ( identifier[zero_byte] )
keyword[return] identifier[self] | def add_boolean(self, b):
"""
Add a boolean value to the stream.
:param bool b: boolean value to add
"""
if b:
self.packet.write(one_byte) # depends on [control=['if'], data=[]]
else:
self.packet.write(zero_byte)
return self |
def GET_blockchain_num_subdomains( self, path_info, blockchain_name ):
"""
Handle GET /blockchains/:blockchainID/subdomains_count
Takes `all=true` to include expired names
Reply with the number of names on this blockchain
"""
if blockchain_name != 'bitcoin':
# not supported
self._reply_json({'error': 'Unsupported blockchain'}, status_code=404)
return
blockstackd_url = get_blockstackd_url()
num_names = blockstackd_client.get_num_subdomains(hostport=blockstackd_url)
if json_is_error(num_names):
if json_is_exception(num_names):
status_code = 406
else:
status_code = 404
self._reply_json({'error': num_names['error']}, status_code=status_code)
return
self._reply_json({'names_count': num_names})
return | def function[GET_blockchain_num_subdomains, parameter[self, path_info, blockchain_name]]:
constant[
Handle GET /blockchains/:blockchainID/subdomains_count
Takes `all=true` to include expired names
Reply with the number of names on this blockchain
]
if compare[name[blockchain_name] not_equal[!=] constant[bitcoin]] begin[:]
call[name[self]._reply_json, parameter[dictionary[[<ast.Constant object at 0x7da20c6a8f40>], [<ast.Constant object at 0x7da20c6aba90>]]]]
return[None]
variable[blockstackd_url] assign[=] call[name[get_blockstackd_url], parameter[]]
variable[num_names] assign[=] call[name[blockstackd_client].get_num_subdomains, parameter[]]
if call[name[json_is_error], parameter[name[num_names]]] begin[:]
if call[name[json_is_exception], parameter[name[num_names]]] begin[:]
variable[status_code] assign[=] constant[406]
call[name[self]._reply_json, parameter[dictionary[[<ast.Constant object at 0x7da20c6ab340>], [<ast.Subscript object at 0x7da20c6a9ff0>]]]]
return[None]
call[name[self]._reply_json, parameter[dictionary[[<ast.Constant object at 0x7da20c6aa8c0>], [<ast.Name object at 0x7da20c6abf40>]]]]
return[None] | keyword[def] identifier[GET_blockchain_num_subdomains] ( identifier[self] , identifier[path_info] , identifier[blockchain_name] ):
literal[string]
keyword[if] identifier[blockchain_name] != literal[string] :
identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] }, identifier[status_code] = literal[int] )
keyword[return]
identifier[blockstackd_url] = identifier[get_blockstackd_url] ()
identifier[num_names] = identifier[blockstackd_client] . identifier[get_num_subdomains] ( identifier[hostport] = identifier[blockstackd_url] )
keyword[if] identifier[json_is_error] ( identifier[num_names] ):
keyword[if] identifier[json_is_exception] ( identifier[num_names] ):
identifier[status_code] = literal[int]
keyword[else] :
identifier[status_code] = literal[int]
identifier[self] . identifier[_reply_json] ({ literal[string] : identifier[num_names] [ literal[string] ]}, identifier[status_code] = identifier[status_code] )
keyword[return]
identifier[self] . identifier[_reply_json] ({ literal[string] : identifier[num_names] })
keyword[return] | def GET_blockchain_num_subdomains(self, path_info, blockchain_name):
"""
Handle GET /blockchains/:blockchainID/subdomains_count
Takes `all=true` to include expired names
Reply with the number of names on this blockchain
"""
if blockchain_name != 'bitcoin':
# not supported
self._reply_json({'error': 'Unsupported blockchain'}, status_code=404)
return # depends on [control=['if'], data=[]]
blockstackd_url = get_blockstackd_url()
num_names = blockstackd_client.get_num_subdomains(hostport=blockstackd_url)
if json_is_error(num_names):
if json_is_exception(num_names):
status_code = 406 # depends on [control=['if'], data=[]]
else:
status_code = 404
self._reply_json({'error': num_names['error']}, status_code=status_code)
return # depends on [control=['if'], data=[]]
self._reply_json({'names_count': num_names})
return |
def kcover(I,J,c,k):
"""kcover -- minimize the number of uncovered customers from k facilities.
Parameters:
- I: set of customers
- J: set of potential facilities
- c[i,j]: cost of servicing customer i from facility j
- k: number of facilities to be used
Returns a model, ready to be solved.
"""
model = Model("k-center")
z,y,x = {},{},{}
for i in I:
z[i] = model.addVar(vtype="B", name="z(%s)"%i, obj=1)
for j in J:
y[j] = model.addVar(vtype="B", name="y(%s)"%j)
for i in I:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in I:
model.addCons(quicksum(x[i,j] for j in J) + z[i] == 1, "Assign(%s)"%i)
for j in J:
model.addCons(x[i,j] <= y[j], "Strong(%s,%s)"%(i,j))
model.addCons(sum(y[j] for j in J) == k, "k_center")
model.data = x,y,z
return model | def function[kcover, parameter[I, J, c, k]]:
constant[kcover -- minimize the number of uncovered customers from k facilities.
Parameters:
- I: set of customers
- J: set of potential facilities
- c[i,j]: cost of servicing customer i from facility j
- k: number of facilities to be used
Returns a model, ready to be solved.
]
variable[model] assign[=] call[name[Model], parameter[constant[k-center]]]
<ast.Tuple object at 0x7da18f00c460> assign[=] tuple[[<ast.Dict object at 0x7da18f00d8a0>, <ast.Dict object at 0x7da18f00e830>, <ast.Dict object at 0x7da18f00dc60>]]
for taget[name[i]] in starred[name[I]] begin[:]
call[name[z]][name[i]] assign[=] call[name[model].addVar, parameter[]]
for taget[name[j]] in starred[name[J]] begin[:]
call[name[y]][name[j]] assign[=] call[name[model].addVar, parameter[]]
for taget[name[i]] in starred[name[I]] begin[:]
call[name[x]][tuple[[<ast.Name object at 0x7da1b17f6380>, <ast.Name object at 0x7da1b17f51b0>]]] assign[=] call[name[model].addVar, parameter[]]
for taget[name[i]] in starred[name[I]] begin[:]
call[name[model].addCons, parameter[compare[binary_operation[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b17f6b30>]] + call[name[z]][name[i]]] equal[==] constant[1]], binary_operation[constant[Assign(%s)] <ast.Mod object at 0x7da2590d6920> name[i]]]]
for taget[name[j]] in starred[name[J]] begin[:]
call[name[model].addCons, parameter[compare[call[name[x]][tuple[[<ast.Name object at 0x7da1b17f7b80>, <ast.Name object at 0x7da1b17f5570>]]] less_or_equal[<=] call[name[y]][name[j]]], binary_operation[constant[Strong(%s,%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b17f4dc0>, <ast.Name object at 0x7da1b17f7910>]]]]]
call[name[model].addCons, parameter[compare[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b17f71c0>]] equal[==] name[k]], constant[k_center]]]
name[model].data assign[=] tuple[[<ast.Name object at 0x7da1b17f5390>, <ast.Name object at 0x7da1b17f5360>, <ast.Name object at 0x7da1b17f5a50>]]
return[name[model]] | keyword[def] identifier[kcover] ( identifier[I] , identifier[J] , identifier[c] , identifier[k] ):
literal[string]
identifier[model] = identifier[Model] ( literal[string] )
identifier[z] , identifier[y] , identifier[x] ={},{},{}
keyword[for] identifier[i] keyword[in] identifier[I] :
identifier[z] [ identifier[i] ]= identifier[model] . identifier[addVar] ( identifier[vtype] = literal[string] , identifier[name] = literal[string] % identifier[i] , identifier[obj] = literal[int] )
keyword[for] identifier[j] keyword[in] identifier[J] :
identifier[y] [ identifier[j] ]= identifier[model] . identifier[addVar] ( identifier[vtype] = literal[string] , identifier[name] = literal[string] % identifier[j] )
keyword[for] identifier[i] keyword[in] identifier[I] :
identifier[x] [ identifier[i] , identifier[j] ]= identifier[model] . identifier[addVar] ( identifier[vtype] = literal[string] , identifier[name] = literal[string] %( identifier[i] , identifier[j] ))
keyword[for] identifier[i] keyword[in] identifier[I] :
identifier[model] . identifier[addCons] ( identifier[quicksum] ( identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[J] )+ identifier[z] [ identifier[i] ]== literal[int] , literal[string] % identifier[i] )
keyword[for] identifier[j] keyword[in] identifier[J] :
identifier[model] . identifier[addCons] ( identifier[x] [ identifier[i] , identifier[j] ]<= identifier[y] [ identifier[j] ], literal[string] %( identifier[i] , identifier[j] ))
identifier[model] . identifier[addCons] ( identifier[sum] ( identifier[y] [ identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[J] )== identifier[k] , literal[string] )
identifier[model] . identifier[data] = identifier[x] , identifier[y] , identifier[z]
keyword[return] identifier[model] | def kcover(I, J, c, k):
"""kcover -- minimize the number of uncovered customers from k facilities.
Parameters:
- I: set of customers
- J: set of potential facilities
- c[i,j]: cost of servicing customer i from facility j
- k: number of facilities to be used
Returns a model, ready to be solved.
"""
model = Model('k-center')
(z, y, x) = ({}, {}, {})
for i in I:
z[i] = model.addVar(vtype='B', name='z(%s)' % i, obj=1) # depends on [control=['for'], data=['i']]
for j in J:
y[j] = model.addVar(vtype='B', name='y(%s)' % j)
for i in I:
x[i, j] = model.addVar(vtype='B', name='x(%s,%s)' % (i, j)) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['j']]
for i in I:
model.addCons(quicksum((x[i, j] for j in J)) + z[i] == 1, 'Assign(%s)' % i)
for j in J:
model.addCons(x[i, j] <= y[j], 'Strong(%s,%s)' % (i, j)) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
model.addCons(sum((y[j] for j in J)) == k, 'k_center')
model.data = (x, y, z)
return model |
def hide_routemap_holder_route_map_content_match_extcommunity_extcommunity_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
match = ET.SubElement(content, "match")
extcommunity = ET.SubElement(match, "extcommunity")
extcommunity_num = ET.SubElement(extcommunity, "extcommunity-num")
extcommunity_num.text = kwargs.pop('extcommunity_num')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[hide_routemap_holder_route_map_content_match_extcommunity_extcommunity_num, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[hide_routemap_holder] assign[=] call[name[ET].SubElement, parameter[name[config], constant[hide-routemap-holder]]]
variable[route_map] assign[=] call[name[ET].SubElement, parameter[name[hide_routemap_holder], constant[route-map]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[action_rm_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[action-rm]]]
name[action_rm_key].text assign[=] call[name[kwargs].pop, parameter[constant[action_rm]]]
variable[instance_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[instance]]]
name[instance_key].text assign[=] call[name[kwargs].pop, parameter[constant[instance]]]
variable[content] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[content]]]
variable[match] assign[=] call[name[ET].SubElement, parameter[name[content], constant[match]]]
variable[extcommunity] assign[=] call[name[ET].SubElement, parameter[name[match], constant[extcommunity]]]
variable[extcommunity_num] assign[=] call[name[ET].SubElement, parameter[name[extcommunity], constant[extcommunity-num]]]
name[extcommunity_num].text assign[=] call[name[kwargs].pop, parameter[constant[extcommunity_num]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[hide_routemap_holder_route_map_content_match_extcommunity_extcommunity_num] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[hide_routemap_holder] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[route_map] = identifier[ET] . identifier[SubElement] ( identifier[hide_routemap_holder] , literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[action_rm_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[action_rm_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[instance_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[instance_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[content] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[match] = identifier[ET] . identifier[SubElement] ( identifier[content] , literal[string] )
identifier[extcommunity] = identifier[ET] . identifier[SubElement] ( identifier[match] , literal[string] )
identifier[extcommunity_num] = identifier[ET] . identifier[SubElement] ( identifier[extcommunity] , literal[string] )
identifier[extcommunity_num] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def hide_routemap_holder_route_map_content_match_extcommunity_extcommunity_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
hide_routemap_holder = ET.SubElement(config, 'hide-routemap-holder', xmlns='urn:brocade.com:mgmt:brocade-ip-policy')
route_map = ET.SubElement(hide_routemap_holder, 'route-map')
name_key = ET.SubElement(route_map, 'name')
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, 'action-rm')
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, 'instance')
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, 'content')
match = ET.SubElement(content, 'match')
extcommunity = ET.SubElement(match, 'extcommunity')
extcommunity_num = ET.SubElement(extcommunity, 'extcommunity-num')
extcommunity_num.text = kwargs.pop('extcommunity_num')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def cotangent(x, null=(-np.inf, np.inf), rtol=default_rtol, atol=default_atol):
'''
cotangent(x) is equivalent to cot(x) except that it also works on sparse arrays.
The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what
value(s) should be assigned when x == 0 or pi. If only one number is given, then it is used for
both values; otherwise the first value corresponds to 0 and the second to pi. A value of x is
considered to be equal to one of these valids based on numpy.isclose. The optional arguments
rtol and atol are passed along to isclose. If null is None, then no replacement is performed.
'''
if sps.issparse(x): x = x.toarray()
else: x = np.asarray(x)
if rtol is None: rtol = default_rtol
if atol is None: atol = default_atol
try: (nln,nlp) = null
except Exception: (nln,nlp) = (null,null)
x = np.mod(x + hpi, tau) - hpi
ii = None if nln is None else np.where(np.isclose(x, 0, rtol=rtol, atol=atol))
jj = None if nlp is None else np.where(np.isclose(x, pi, rtol=rtol, atol=atol))
x = np.tan(x)
if ii: x[ii] = 1
if jj: x[jj] = 1
x = 1.0 / x
if ii: x[ii] = nln
if jj: x[jj] = nlp
return x | def function[cotangent, parameter[x, null, rtol, atol]]:
constant[
cotangent(x) is equivalent to cot(x) except that it also works on sparse arrays.
The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what
value(s) should be assigned when x == 0 or pi. If only one number is given, then it is used for
both values; otherwise the first value corresponds to 0 and the second to pi. A value of x is
considered to be equal to one of these valids based on numpy.isclose. The optional arguments
rtol and atol are passed along to isclose. If null is None, then no replacement is performed.
]
if call[name[sps].issparse, parameter[name[x]]] begin[:]
variable[x] assign[=] call[name[x].toarray, parameter[]]
if compare[name[rtol] is constant[None]] begin[:]
variable[rtol] assign[=] name[default_rtol]
if compare[name[atol] is constant[None]] begin[:]
variable[atol] assign[=] name[default_atol]
<ast.Try object at 0x7da1b26af670>
variable[x] assign[=] binary_operation[call[name[np].mod, parameter[binary_operation[name[x] + name[hpi]], name[tau]]] - name[hpi]]
variable[ii] assign[=] <ast.IfExp object at 0x7da18eb55de0>
variable[jj] assign[=] <ast.IfExp object at 0x7da1b0e39480>
variable[x] assign[=] call[name[np].tan, parameter[name[x]]]
if name[ii] begin[:]
call[name[x]][name[ii]] assign[=] constant[1]
if name[jj] begin[:]
call[name[x]][name[jj]] assign[=] constant[1]
variable[x] assign[=] binary_operation[constant[1.0] / name[x]]
if name[ii] begin[:]
call[name[x]][name[ii]] assign[=] name[nln]
if name[jj] begin[:]
call[name[x]][name[jj]] assign[=] name[nlp]
return[name[x]] | keyword[def] identifier[cotangent] ( identifier[x] , identifier[null] =(- identifier[np] . identifier[inf] , identifier[np] . identifier[inf] ), identifier[rtol] = identifier[default_rtol] , identifier[atol] = identifier[default_atol] ):
literal[string]
keyword[if] identifier[sps] . identifier[issparse] ( identifier[x] ): identifier[x] = identifier[x] . identifier[toarray] ()
keyword[else] : identifier[x] = identifier[np] . identifier[asarray] ( identifier[x] )
keyword[if] identifier[rtol] keyword[is] keyword[None] : identifier[rtol] = identifier[default_rtol]
keyword[if] identifier[atol] keyword[is] keyword[None] : identifier[atol] = identifier[default_atol]
keyword[try] :( identifier[nln] , identifier[nlp] )= identifier[null]
keyword[except] identifier[Exception] :( identifier[nln] , identifier[nlp] )=( identifier[null] , identifier[null] )
identifier[x] = identifier[np] . identifier[mod] ( identifier[x] + identifier[hpi] , identifier[tau] )- identifier[hpi]
identifier[ii] = keyword[None] keyword[if] identifier[nln] keyword[is] keyword[None] keyword[else] identifier[np] . identifier[where] ( identifier[np] . identifier[isclose] ( identifier[x] , literal[int] , identifier[rtol] = identifier[rtol] , identifier[atol] = identifier[atol] ))
identifier[jj] = keyword[None] keyword[if] identifier[nlp] keyword[is] keyword[None] keyword[else] identifier[np] . identifier[where] ( identifier[np] . identifier[isclose] ( identifier[x] , identifier[pi] , identifier[rtol] = identifier[rtol] , identifier[atol] = identifier[atol] ))
identifier[x] = identifier[np] . identifier[tan] ( identifier[x] )
keyword[if] identifier[ii] : identifier[x] [ identifier[ii] ]= literal[int]
keyword[if] identifier[jj] : identifier[x] [ identifier[jj] ]= literal[int]
identifier[x] = literal[int] / identifier[x]
keyword[if] identifier[ii] : identifier[x] [ identifier[ii] ]= identifier[nln]
keyword[if] identifier[jj] : identifier[x] [ identifier[jj] ]= identifier[nlp]
keyword[return] identifier[x] | def cotangent(x, null=(-np.inf, np.inf), rtol=default_rtol, atol=default_atol):
"""
cotangent(x) is equivalent to cot(x) except that it also works on sparse arrays.
The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what
value(s) should be assigned when x == 0 or pi. If only one number is given, then it is used for
both values; otherwise the first value corresponds to 0 and the second to pi. A value of x is
considered to be equal to one of these valids based on numpy.isclose. The optional arguments
rtol and atol are passed along to isclose. If null is None, then no replacement is performed.
"""
if sps.issparse(x):
x = x.toarray() # depends on [control=['if'], data=[]]
else:
x = np.asarray(x)
if rtol is None:
rtol = default_rtol # depends on [control=['if'], data=['rtol']]
if atol is None:
atol = default_atol # depends on [control=['if'], data=['atol']]
try:
(nln, nlp) = null # depends on [control=['try'], data=[]]
except Exception:
(nln, nlp) = (null, null) # depends on [control=['except'], data=[]]
x = np.mod(x + hpi, tau) - hpi
ii = None if nln is None else np.where(np.isclose(x, 0, rtol=rtol, atol=atol))
jj = None if nlp is None else np.where(np.isclose(x, pi, rtol=rtol, atol=atol))
x = np.tan(x)
if ii:
x[ii] = 1 # depends on [control=['if'], data=[]]
if jj:
x[jj] = 1 # depends on [control=['if'], data=[]]
x = 1.0 / x
if ii:
x[ii] = nln # depends on [control=['if'], data=[]]
if jj:
x[jj] = nlp # depends on [control=['if'], data=[]]
return x |
def create_or_login(resp):
"""This is called when login with OpenID succeeded and it's not
necessary to figure out if this is the users's first login or not.
This function has to redirect otherwise the user will be presented
with a terrible URL which we certainly don't want.
"""
session['openid'] = resp.identity_url
user = User.get_collection().find_one({'openid':resp.identity_url})
if user is not None:
flash(u'Successfully signed in')
g.user = user
return redirect(oid.get_next_url())
return redirect(url_for('create_profile', next=oid.get_next_url(),
name=resp.fullname or resp.nickname,
email=resp.email)) | def function[create_or_login, parameter[resp]]:
constant[This is called when login with OpenID succeeded and it's not
necessary to figure out if this is the users's first login or not.
This function has to redirect otherwise the user will be presented
with a terrible URL which we certainly don't want.
]
call[name[session]][constant[openid]] assign[=] name[resp].identity_url
variable[user] assign[=] call[call[name[User].get_collection, parameter[]].find_one, parameter[dictionary[[<ast.Constant object at 0x7da2041daa40>], [<ast.Attribute object at 0x7da2041dbd00>]]]]
if compare[name[user] is_not constant[None]] begin[:]
call[name[flash], parameter[constant[Successfully signed in]]]
name[g].user assign[=] name[user]
return[call[name[redirect], parameter[call[name[oid].get_next_url, parameter[]]]]]
return[call[name[redirect], parameter[call[name[url_for], parameter[constant[create_profile]]]]]] | keyword[def] identifier[create_or_login] ( identifier[resp] ):
literal[string]
identifier[session] [ literal[string] ]= identifier[resp] . identifier[identity_url]
identifier[user] = identifier[User] . identifier[get_collection] (). identifier[find_one] ({ literal[string] : identifier[resp] . identifier[identity_url] })
keyword[if] identifier[user] keyword[is] keyword[not] keyword[None] :
identifier[flash] ( literal[string] )
identifier[g] . identifier[user] = identifier[user]
keyword[return] identifier[redirect] ( identifier[oid] . identifier[get_next_url] ())
keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] , identifier[next] = identifier[oid] . identifier[get_next_url] (),
identifier[name] = identifier[resp] . identifier[fullname] keyword[or] identifier[resp] . identifier[nickname] ,
identifier[email] = identifier[resp] . identifier[email] )) | def create_or_login(resp):
"""This is called when login with OpenID succeeded and it's not
necessary to figure out if this is the users's first login or not.
This function has to redirect otherwise the user will be presented
with a terrible URL which we certainly don't want.
"""
session['openid'] = resp.identity_url
user = User.get_collection().find_one({'openid': resp.identity_url})
if user is not None:
flash(u'Successfully signed in')
g.user = user
return redirect(oid.get_next_url()) # depends on [control=['if'], data=['user']]
return redirect(url_for('create_profile', next=oid.get_next_url(), name=resp.fullname or resp.nickname, email=resp.email)) |
def get_instance_id():
"""Gets the instance ID of this EC2 instance
:return: String instance ID or None
"""
log = logging.getLogger(mod_logger + '.get_instance_id')
# Exit if not running on AWS
if not is_aws():
log.info('This machine is not running in AWS, exiting...')
return
instance_id_url = metadata_url + 'instance-id'
try:
response = urllib.urlopen(instance_id_url)
except(IOError, OSError) as ex:
msg = 'Unable to query URL to get instance ID: {u}\n{e}'. \
format(u=instance_id_url, e=ex)
log.error(msg)
return
# Check the code
if response.getcode() != 200:
msg = 'There was a problem querying url: {u}, returned code: {c}, unable to get the instance-id'.format(
u=instance_id_url, c=response.getcode())
log.error(msg)
return
instance_id = response.read()
return instance_id | def function[get_instance_id, parameter[]]:
constant[Gets the instance ID of this EC2 instance
:return: String instance ID or None
]
variable[log] assign[=] call[name[logging].getLogger, parameter[binary_operation[name[mod_logger] + constant[.get_instance_id]]]]
if <ast.UnaryOp object at 0x7da1b10c3760> begin[:]
call[name[log].info, parameter[constant[This machine is not running in AWS, exiting...]]]
return[None]
variable[instance_id_url] assign[=] binary_operation[name[metadata_url] + constant[instance-id]]
<ast.Try object at 0x7da1b10c36d0>
if compare[call[name[response].getcode, parameter[]] not_equal[!=] constant[200]] begin[:]
variable[msg] assign[=] call[constant[There was a problem querying url: {u}, returned code: {c}, unable to get the instance-id].format, parameter[]]
call[name[log].error, parameter[name[msg]]]
return[None]
variable[instance_id] assign[=] call[name[response].read, parameter[]]
return[name[instance_id]] | keyword[def] identifier[get_instance_id] ():
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[mod_logger] + literal[string] )
keyword[if] keyword[not] identifier[is_aws] ():
identifier[log] . identifier[info] ( literal[string] )
keyword[return]
identifier[instance_id_url] = identifier[metadata_url] + literal[string]
keyword[try] :
identifier[response] = identifier[urllib] . identifier[urlopen] ( identifier[instance_id_url] )
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[ex] :
identifier[msg] = literal[string] . identifier[format] ( identifier[u] = identifier[instance_id_url] , identifier[e] = identifier[ex] )
identifier[log] . identifier[error] ( identifier[msg] )
keyword[return]
keyword[if] identifier[response] . identifier[getcode] ()!= literal[int] :
identifier[msg] = literal[string] . identifier[format] (
identifier[u] = identifier[instance_id_url] , identifier[c] = identifier[response] . identifier[getcode] ())
identifier[log] . identifier[error] ( identifier[msg] )
keyword[return]
identifier[instance_id] = identifier[response] . identifier[read] ()
keyword[return] identifier[instance_id] | def get_instance_id():
"""Gets the instance ID of this EC2 instance
:return: String instance ID or None
"""
log = logging.getLogger(mod_logger + '.get_instance_id')
# Exit if not running on AWS
if not is_aws():
log.info('This machine is not running in AWS, exiting...')
return # depends on [control=['if'], data=[]]
instance_id_url = metadata_url + 'instance-id'
try:
response = urllib.urlopen(instance_id_url) # depends on [control=['try'], data=[]]
except (IOError, OSError) as ex:
msg = 'Unable to query URL to get instance ID: {u}\n{e}'.format(u=instance_id_url, e=ex)
log.error(msg)
return # depends on [control=['except'], data=['ex']]
# Check the code
if response.getcode() != 200:
msg = 'There was a problem querying url: {u}, returned code: {c}, unable to get the instance-id'.format(u=instance_id_url, c=response.getcode())
log.error(msg)
return # depends on [control=['if'], data=[]]
instance_id = response.read()
return instance_id |
def upload(target):
# type: (str) -> None
""" Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
"""
log.info("Uploading to pypi server <33>{}".format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target)) | def function[upload, parameter[target]]:
constant[ Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
]
call[name[log].info, parameter[call[constant[Uploading to pypi server <33>{}].format, parameter[name[target]]]]]
with call[name[conf].within_proj_dir, parameter[]] begin[:]
call[name[shell].run, parameter[call[constant[python setup.py sdist register -r "{}"].format, parameter[name[target]]]]]
call[name[shell].run, parameter[call[constant[python setup.py sdist upload -r "{}"].format, parameter[name[target]]]]] | keyword[def] identifier[upload] ( identifier[target] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[target] ))
keyword[with] identifier[conf] . identifier[within_proj_dir] ():
identifier[shell] . identifier[run] ( literal[string] . identifier[format] ( identifier[target] ))
identifier[shell] . identifier[run] ( literal[string] . identifier[format] ( identifier[target] )) | def upload(target):
# type: (str) -> None
' Upload the release to a pypi server.\n\n TODO: Make sure the git directory is clean before allowing a release.\n\n Args:\n target (str):\n pypi target as defined in ~/.pypirc\n '
log.info('Uploading to pypi server <33>{}'.format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target)) # depends on [control=['with'], data=[]] |
def xd(self):
"""get xarray dataset file handle to LSM files"""
if self._xd is None:
# download files if the user requests
if None not in (self.download_start_datetime, self.download_end_datetime):
self._download()
self._xd = super(ERAtoGSSHA, self).xd
self._xd.lsm.lon_to_180 = True
return self._xd | def function[xd, parameter[self]]:
constant[get xarray dataset file handle to LSM files]
if compare[name[self]._xd is constant[None]] begin[:]
if compare[constant[None] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da20c6a9f90>, <ast.Attribute object at 0x7da20c6ab100>]]] begin[:]
call[name[self]._download, parameter[]]
name[self]._xd assign[=] call[name[super], parameter[name[ERAtoGSSHA], name[self]]].xd
name[self]._xd.lsm.lon_to_180 assign[=] constant[True]
return[name[self]._xd] | keyword[def] identifier[xd] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_xd] keyword[is] keyword[None] :
keyword[if] keyword[None] keyword[not] keyword[in] ( identifier[self] . identifier[download_start_datetime] , identifier[self] . identifier[download_end_datetime] ):
identifier[self] . identifier[_download] ()
identifier[self] . identifier[_xd] = identifier[super] ( identifier[ERAtoGSSHA] , identifier[self] ). identifier[xd]
identifier[self] . identifier[_xd] . identifier[lsm] . identifier[lon_to_180] = keyword[True]
keyword[return] identifier[self] . identifier[_xd] | def xd(self):
"""get xarray dataset file handle to LSM files"""
if self._xd is None:
# download files if the user requests
if None not in (self.download_start_datetime, self.download_end_datetime):
self._download() # depends on [control=['if'], data=[]]
self._xd = super(ERAtoGSSHA, self).xd
self._xd.lsm.lon_to_180 = True # depends on [control=['if'], data=[]]
return self._xd |
def fraction_correct_fuzzy_linear_create_vector(z, z_cutoff, z_fuzzy_range):
'''A helper function for fraction_correct_fuzzy_linear.'''
assert(z_fuzzy_range * 2 < z_cutoff)
if (z == None or numpy.isnan(z)): # todo: and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values
return None
elif (z >= z_cutoff + z_fuzzy_range): # positive e.g. z >= 1.1
return [0, 0, 1]
elif (z <= -z_cutoff - z_fuzzy_range): # negative e.g. z <= -1.1
return [1, 0, 0]
elif (-z_cutoff + z_fuzzy_range <= z <= z_cutoff - z_fuzzy_range): # neutral e.g. -0.9 <= z <= 0.9
return [0, 1, 0]
elif (-z_cutoff - z_fuzzy_range < z < -z_cutoff + z_fuzzy_range): # negative/neutral e.g. -1.1 < z < 0.9
neutrality = (z + z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [1 - neutrality, neutrality, 0]
elif (z_cutoff - z_fuzzy_range < z < z_cutoff + z_fuzzy_range): # neutral/positive e.g. 0.9 < z < 1.1
positivity = (z - z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [0, 1 - positivity, positivity]
else:
raise Exception('Logical error.')
# normalize the vector
length = math.sqrt(numpy.dot(zvec, zvec))
return numpy.divide(zvec, length) | def function[fraction_correct_fuzzy_linear_create_vector, parameter[z, z_cutoff, z_fuzzy_range]]:
constant[A helper function for fraction_correct_fuzzy_linear.]
assert[compare[binary_operation[name[z_fuzzy_range] * constant[2]] less[<] name[z_cutoff]]]
if <ast.BoolOp object at 0x7da1b23d5540> begin[:]
return[constant[None]]
variable[length] assign[=] call[name[math].sqrt, parameter[call[name[numpy].dot, parameter[name[zvec], name[zvec]]]]]
return[call[name[numpy].divide, parameter[name[zvec], name[length]]]] | keyword[def] identifier[fraction_correct_fuzzy_linear_create_vector] ( identifier[z] , identifier[z_cutoff] , identifier[z_fuzzy_range] ):
literal[string]
keyword[assert] ( identifier[z_fuzzy_range] * literal[int] < identifier[z_cutoff] )
keyword[if] ( identifier[z] == keyword[None] keyword[or] identifier[numpy] . identifier[isnan] ( identifier[z] )):
keyword[return] keyword[None]
keyword[elif] ( identifier[z] >= identifier[z_cutoff] + identifier[z_fuzzy_range] ):
keyword[return] [ literal[int] , literal[int] , literal[int] ]
keyword[elif] ( identifier[z] <=- identifier[z_cutoff] - identifier[z_fuzzy_range] ):
keyword[return] [ literal[int] , literal[int] , literal[int] ]
keyword[elif] (- identifier[z_cutoff] + identifier[z_fuzzy_range] <= identifier[z] <= identifier[z_cutoff] - identifier[z_fuzzy_range] ):
keyword[return] [ literal[int] , literal[int] , literal[int] ]
keyword[elif] (- identifier[z_cutoff] - identifier[z_fuzzy_range] < identifier[z] <- identifier[z_cutoff] + identifier[z_fuzzy_range] ):
identifier[neutrality] =( identifier[z] + identifier[z_cutoff] + identifier[z_fuzzy_range] )/( identifier[z_fuzzy_range] * literal[int] )
identifier[zvec] =[ literal[int] - identifier[neutrality] , identifier[neutrality] , literal[int] ]
keyword[elif] ( identifier[z_cutoff] - identifier[z_fuzzy_range] < identifier[z] < identifier[z_cutoff] + identifier[z_fuzzy_range] ):
identifier[positivity] =( identifier[z] - identifier[z_cutoff] + identifier[z_fuzzy_range] )/( identifier[z_fuzzy_range] * literal[int] )
identifier[zvec] =[ literal[int] , literal[int] - identifier[positivity] , identifier[positivity] ]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[length] = identifier[math] . identifier[sqrt] ( identifier[numpy] . identifier[dot] ( identifier[zvec] , identifier[zvec] ))
keyword[return] identifier[numpy] . identifier[divide] ( identifier[zvec] , identifier[length] ) | def fraction_correct_fuzzy_linear_create_vector(z, z_cutoff, z_fuzzy_range):
"""A helper function for fraction_correct_fuzzy_linear."""
assert z_fuzzy_range * 2 < z_cutoff
if z == None or numpy.isnan(z): # todo: and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values
return None # depends on [control=['if'], data=[]]
elif z >= z_cutoff + z_fuzzy_range: # positive e.g. z >= 1.1
return [0, 0, 1] # depends on [control=['if'], data=[]]
elif z <= -z_cutoff - z_fuzzy_range: # negative e.g. z <= -1.1
return [1, 0, 0] # depends on [control=['if'], data=[]]
elif -z_cutoff + z_fuzzy_range <= z <= z_cutoff - z_fuzzy_range: # neutral e.g. -0.9 <= z <= 0.9
return [0, 1, 0] # depends on [control=['if'], data=[]]
elif -z_cutoff - z_fuzzy_range < z < -z_cutoff + z_fuzzy_range: # negative/neutral e.g. -1.1 < z < 0.9
neutrality = (z + z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [1 - neutrality, neutrality, 0] # depends on [control=['if'], data=['z']]
elif z_cutoff - z_fuzzy_range < z < z_cutoff + z_fuzzy_range: # neutral/positive e.g. 0.9 < z < 1.1
positivity = (z - z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [0, 1 - positivity, positivity] # depends on [control=['if'], data=['z']]
else:
raise Exception('Logical error.')
# normalize the vector
length = math.sqrt(numpy.dot(zvec, zvec))
return numpy.divide(zvec, length) |
def provider_id_slot(self, other):
"""
Args:
other (ConfigProvider): Provider to examine
Returns:
(int | None): Index of existing provider with same id, if any
"""
if other:
pid = other.provider_id()
for i, provider in enumerate(self.providers):
if provider.provider_id() == pid:
return i
return None | def function[provider_id_slot, parameter[self, other]]:
constant[
Args:
other (ConfigProvider): Provider to examine
Returns:
(int | None): Index of existing provider with same id, if any
]
if name[other] begin[:]
variable[pid] assign[=] call[name[other].provider_id, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b23eceb0>, <ast.Name object at 0x7da1b23eeb00>]]] in starred[call[name[enumerate], parameter[name[self].providers]]] begin[:]
if compare[call[name[provider].provider_id, parameter[]] equal[==] name[pid]] begin[:]
return[name[i]]
return[constant[None]] | keyword[def] identifier[provider_id_slot] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] identifier[other] :
identifier[pid] = identifier[other] . identifier[provider_id] ()
keyword[for] identifier[i] , identifier[provider] keyword[in] identifier[enumerate] ( identifier[self] . identifier[providers] ):
keyword[if] identifier[provider] . identifier[provider_id] ()== identifier[pid] :
keyword[return] identifier[i]
keyword[return] keyword[None] | def provider_id_slot(self, other):
"""
Args:
other (ConfigProvider): Provider to examine
Returns:
(int | None): Index of existing provider with same id, if any
"""
if other:
pid = other.provider_id()
for (i, provider) in enumerate(self.providers):
if provider.provider_id() == pid:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.