code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def write_table(self, d):
"""
Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints.
"""
# HACK: encoding of AMQP tables is broken because it requires the
# length of the /encoded/ data instead of the number of items. To
# support streaming, fiddle with cursor position, rewinding to write
# the real length of the data. Generally speaking, I'm not a fan of
# the AMQP encoding scheme, it could be much faster.
table_len_pos = len(self._output_buffer)
self.write_long(0)
table_data_pos = len(self._output_buffer)
for key, value in d.iteritems():
self._write_item(key, value)
table_end_pos = len(self._output_buffer)
table_len = table_end_pos - table_data_pos
self.write_long_at(table_len, table_len_pos)
return self
|
def function[write_table, parameter[self, d]]:
constant[
Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints.
]
variable[table_len_pos] assign[=] call[name[len], parameter[name[self]._output_buffer]]
call[name[self].write_long, parameter[constant[0]]]
variable[table_data_pos] assign[=] call[name[len], parameter[name[self]._output_buffer]]
for taget[tuple[[<ast.Name object at 0x7da18f09eb60>, <ast.Name object at 0x7da18f09dc30>]]] in starred[call[name[d].iteritems, parameter[]]] begin[:]
call[name[self]._write_item, parameter[name[key], name[value]]]
variable[table_end_pos] assign[=] call[name[len], parameter[name[self]._output_buffer]]
variable[table_len] assign[=] binary_operation[name[table_end_pos] - name[table_data_pos]]
call[name[self].write_long_at, parameter[name[table_len], name[table_len_pos]]]
return[name[self]]
|
keyword[def] identifier[write_table] ( identifier[self] , identifier[d] ):
literal[string]
identifier[table_len_pos] = identifier[len] ( identifier[self] . identifier[_output_buffer] )
identifier[self] . identifier[write_long] ( literal[int] )
identifier[table_data_pos] = identifier[len] ( identifier[self] . identifier[_output_buffer] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[d] . identifier[iteritems] ():
identifier[self] . identifier[_write_item] ( identifier[key] , identifier[value] )
identifier[table_end_pos] = identifier[len] ( identifier[self] . identifier[_output_buffer] )
identifier[table_len] = identifier[table_end_pos] - identifier[table_data_pos]
identifier[self] . identifier[write_long_at] ( identifier[table_len] , identifier[table_len_pos] )
keyword[return] identifier[self]
|
def write_table(self, d):
"""
Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints.
"""
# HACK: encoding of AMQP tables is broken because it requires the
# length of the /encoded/ data instead of the number of items. To
# support streaming, fiddle with cursor position, rewinding to write
# the real length of the data. Generally speaking, I'm not a fan of
# the AMQP encoding scheme, it could be much faster.
table_len_pos = len(self._output_buffer)
self.write_long(0)
table_data_pos = len(self._output_buffer)
for (key, value) in d.iteritems():
self._write_item(key, value) # depends on [control=['for'], data=[]]
table_end_pos = len(self._output_buffer)
table_len = table_end_pos - table_data_pos
self.write_long_at(table_len, table_len_pos)
return self
|
def set_tlsext_use_srtp(self, profiles):
"""
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
"""
if not isinstance(profiles, bytes):
raise TypeError("profiles must be a byte string.")
_openssl_assert(
_lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0
)
|
def function[set_tlsext_use_srtp, parameter[self, profiles]]:
constant[
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
]
if <ast.UnaryOp object at 0x7da1b020e680> begin[:]
<ast.Raise object at 0x7da1b0295a20>
call[name[_openssl_assert], parameter[compare[call[name[_lib].SSL_CTX_set_tlsext_use_srtp, parameter[name[self]._context, name[profiles]]] equal[==] constant[0]]]]
|
keyword[def] identifier[set_tlsext_use_srtp] ( identifier[self] , identifier[profiles] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[profiles] , identifier[bytes] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[_openssl_assert] (
identifier[_lib] . identifier[SSL_CTX_set_tlsext_use_srtp] ( identifier[self] . identifier[_context] , identifier[profiles] )== literal[int]
)
|
def set_tlsext_use_srtp(self, profiles):
"""
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
"""
if not isinstance(profiles, bytes):
raise TypeError('profiles must be a byte string.') # depends on [control=['if'], data=[]]
_openssl_assert(_lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0)
|
def publish(self, topic, message):
"""Publish a json message to a topic with a type and a sequence number
The actual message will be published as a JSON object:
{
"sequence": <incrementing id>,
"message": message
}
Args:
topic (string): The MQTT topic to publish in
message (string, dict): The message to publish
"""
seq = self.sequencer.next_id(topic)
packet = {
'sequence': seq,
'message': message
}
# Need to encode bytes types for json.dumps
if 'key' in packet['message']:
packet['message']['key'] = packet['message']['key'].decode('utf8')
if 'payload' in packet['message']:
packet['message']['payload'] = packet['message']['payload'].decode('utf8')
if 'script' in packet['message']:
packet['message']['script'] = packet['message']['script'].decode('utf8')
if 'trace' in packet['message']:
packet['message']['trace'] = packet['message']['trace'].decode('utf8')
if 'report' in packet['message']:
packet['message']['report'] = packet['message']['report'].decode('utf8')
if 'received_time' in packet['message']:
packet['message']['received_time'] = packet['message']['received_time'].decode('utf8')
serialized_packet = json.dumps(packet)
try:
# Limit how much we log in case the message is very long
self._logger.debug("Publishing %s on topic %s", serialized_packet[:256], topic)
self.client.publish(topic, serialized_packet, 1)
except operationError as exc:
raise InternalError("Could not publish message", topic=topic, message=exc.message)
|
def function[publish, parameter[self, topic, message]]:
constant[Publish a json message to a topic with a type and a sequence number
The actual message will be published as a JSON object:
{
"sequence": <incrementing id>,
"message": message
}
Args:
topic (string): The MQTT topic to publish in
message (string, dict): The message to publish
]
variable[seq] assign[=] call[name[self].sequencer.next_id, parameter[name[topic]]]
variable[packet] assign[=] dictionary[[<ast.Constant object at 0x7da20e957400>, <ast.Constant object at 0x7da20e956500>], [<ast.Name object at 0x7da20e957460>, <ast.Name object at 0x7da20e9542b0>]]
if compare[constant[key] in call[name[packet]][constant[message]]] begin[:]
call[call[name[packet]][constant[message]]][constant[key]] assign[=] call[call[call[name[packet]][constant[message]]][constant[key]].decode, parameter[constant[utf8]]]
if compare[constant[payload] in call[name[packet]][constant[message]]] begin[:]
call[call[name[packet]][constant[message]]][constant[payload]] assign[=] call[call[call[name[packet]][constant[message]]][constant[payload]].decode, parameter[constant[utf8]]]
if compare[constant[script] in call[name[packet]][constant[message]]] begin[:]
call[call[name[packet]][constant[message]]][constant[script]] assign[=] call[call[call[name[packet]][constant[message]]][constant[script]].decode, parameter[constant[utf8]]]
if compare[constant[trace] in call[name[packet]][constant[message]]] begin[:]
call[call[name[packet]][constant[message]]][constant[trace]] assign[=] call[call[call[name[packet]][constant[message]]][constant[trace]].decode, parameter[constant[utf8]]]
if compare[constant[report] in call[name[packet]][constant[message]]] begin[:]
call[call[name[packet]][constant[message]]][constant[report]] assign[=] call[call[call[name[packet]][constant[message]]][constant[report]].decode, parameter[constant[utf8]]]
if compare[constant[received_time] in call[name[packet]][constant[message]]] begin[:]
call[call[name[packet]][constant[message]]][constant[received_time]] assign[=] call[call[call[name[packet]][constant[message]]][constant[received_time]].decode, parameter[constant[utf8]]]
variable[serialized_packet] assign[=] call[name[json].dumps, parameter[name[packet]]]
<ast.Try object at 0x7da20e955c30>
|
keyword[def] identifier[publish] ( identifier[self] , identifier[topic] , identifier[message] ):
literal[string]
identifier[seq] = identifier[self] . identifier[sequencer] . identifier[next_id] ( identifier[topic] )
identifier[packet] ={
literal[string] : identifier[seq] ,
literal[string] : identifier[message]
}
keyword[if] literal[string] keyword[in] identifier[packet] [ literal[string] ]:
identifier[packet] [ literal[string] ][ literal[string] ]= identifier[packet] [ literal[string] ][ literal[string] ]. identifier[decode] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[packet] [ literal[string] ]:
identifier[packet] [ literal[string] ][ literal[string] ]= identifier[packet] [ literal[string] ][ literal[string] ]. identifier[decode] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[packet] [ literal[string] ]:
identifier[packet] [ literal[string] ][ literal[string] ]= identifier[packet] [ literal[string] ][ literal[string] ]. identifier[decode] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[packet] [ literal[string] ]:
identifier[packet] [ literal[string] ][ literal[string] ]= identifier[packet] [ literal[string] ][ literal[string] ]. identifier[decode] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[packet] [ literal[string] ]:
identifier[packet] [ literal[string] ][ literal[string] ]= identifier[packet] [ literal[string] ][ literal[string] ]. identifier[decode] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[packet] [ literal[string] ]:
identifier[packet] [ literal[string] ][ literal[string] ]= identifier[packet] [ literal[string] ][ literal[string] ]. identifier[decode] ( literal[string] )
identifier[serialized_packet] = identifier[json] . identifier[dumps] ( identifier[packet] )
keyword[try] :
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] , identifier[serialized_packet] [: literal[int] ], identifier[topic] )
identifier[self] . identifier[client] . identifier[publish] ( identifier[topic] , identifier[serialized_packet] , literal[int] )
keyword[except] identifier[operationError] keyword[as] identifier[exc] :
keyword[raise] identifier[InternalError] ( literal[string] , identifier[topic] = identifier[topic] , identifier[message] = identifier[exc] . identifier[message] )
|
def publish(self, topic, message):
"""Publish a json message to a topic with a type and a sequence number
The actual message will be published as a JSON object:
{
"sequence": <incrementing id>,
"message": message
}
Args:
topic (string): The MQTT topic to publish in
message (string, dict): The message to publish
"""
seq = self.sequencer.next_id(topic)
packet = {'sequence': seq, 'message': message}
# Need to encode bytes types for json.dumps
if 'key' in packet['message']:
packet['message']['key'] = packet['message']['key'].decode('utf8') # depends on [control=['if'], data=[]]
if 'payload' in packet['message']:
packet['message']['payload'] = packet['message']['payload'].decode('utf8') # depends on [control=['if'], data=[]]
if 'script' in packet['message']:
packet['message']['script'] = packet['message']['script'].decode('utf8') # depends on [control=['if'], data=[]]
if 'trace' in packet['message']:
packet['message']['trace'] = packet['message']['trace'].decode('utf8') # depends on [control=['if'], data=[]]
if 'report' in packet['message']:
packet['message']['report'] = packet['message']['report'].decode('utf8') # depends on [control=['if'], data=[]]
if 'received_time' in packet['message']:
packet['message']['received_time'] = packet['message']['received_time'].decode('utf8') # depends on [control=['if'], data=[]]
serialized_packet = json.dumps(packet)
try:
# Limit how much we log in case the message is very long
self._logger.debug('Publishing %s on topic %s', serialized_packet[:256], topic)
self.client.publish(topic, serialized_packet, 1) # depends on [control=['try'], data=[]]
except operationError as exc:
raise InternalError('Could not publish message', topic=topic, message=exc.message) # depends on [control=['except'], data=['exc']]
|
def _read_message(self):
""" Reads a single size-annotated message from the server """
size = int(self.buf.read_line().decode("utf-8"))
return self.buf.read(size).decode("utf-8")
|
def function[_read_message, parameter[self]]:
constant[ Reads a single size-annotated message from the server ]
variable[size] assign[=] call[name[int], parameter[call[call[name[self].buf.read_line, parameter[]].decode, parameter[constant[utf-8]]]]]
return[call[call[name[self].buf.read, parameter[name[size]]].decode, parameter[constant[utf-8]]]]
|
keyword[def] identifier[_read_message] ( identifier[self] ):
literal[string]
identifier[size] = identifier[int] ( identifier[self] . identifier[buf] . identifier[read_line] (). identifier[decode] ( literal[string] ))
keyword[return] identifier[self] . identifier[buf] . identifier[read] ( identifier[size] ). identifier[decode] ( literal[string] )
|
def _read_message(self):
""" Reads a single size-annotated message from the server """
size = int(self.buf.read_line().decode('utf-8'))
return self.buf.read(size).decode('utf-8')
|
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if (
include_docstrings or
previous_token_type != tokenize.INDENT
):
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row))
previous_token_type = token_type
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
|
def function[multiline_string_lines, parameter[source, include_docstrings]]:
constant[Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
]
variable[line_numbers] assign[=] call[name[set], parameter[]]
variable[previous_token_type] assign[=] constant[]
<ast.Try object at 0x7da2044c0e20>
return[name[line_numbers]]
|
keyword[def] identifier[multiline_string_lines] ( identifier[source] , identifier[include_docstrings] = keyword[False] ):
literal[string]
identifier[line_numbers] = identifier[set] ()
identifier[previous_token_type] = literal[string]
keyword[try] :
keyword[for] identifier[t] keyword[in] identifier[generate_tokens] ( identifier[source] ):
identifier[token_type] = identifier[t] [ literal[int] ]
identifier[start_row] = identifier[t] [ literal[int] ][ literal[int] ]
identifier[end_row] = identifier[t] [ literal[int] ][ literal[int] ]
keyword[if] identifier[token_type] == identifier[tokenize] . identifier[STRING] keyword[and] identifier[start_row] != identifier[end_row] :
keyword[if] (
identifier[include_docstrings] keyword[or]
identifier[previous_token_type] != identifier[tokenize] . identifier[INDENT]
):
identifier[line_numbers] |= identifier[set] ( identifier[range] ( literal[int] + identifier[start_row] , literal[int] + identifier[end_row] ))
identifier[previous_token_type] = identifier[token_type]
keyword[except] ( identifier[SyntaxError] , identifier[tokenize] . identifier[TokenError] ):
keyword[pass]
keyword[return] identifier[line_numbers]
|
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if include_docstrings or previous_token_type != tokenize.INDENT:
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
previous_token_type = token_type # depends on [control=['for'], data=['t']] # depends on [control=['try'], data=[]]
except (SyntaxError, tokenize.TokenError):
pass # depends on [control=['except'], data=[]]
return line_numbers
|
def encode_dataset(dataset, vocabulary):
"""Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
"""
def encode(features):
return {k: vocabulary.encode_tf(v) for k, v in features.items()}
return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
def function[encode_dataset, parameter[dataset, vocabulary]]:
constant[Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
]
def function[encode, parameter[features]]:
return[<ast.DictComp object at 0x7da20c7cbc40>]
return[call[name[dataset].map, parameter[name[encode]]]]
|
keyword[def] identifier[encode_dataset] ( identifier[dataset] , identifier[vocabulary] ):
literal[string]
keyword[def] identifier[encode] ( identifier[features] ):
keyword[return] { identifier[k] : identifier[vocabulary] . identifier[encode_tf] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[features] . identifier[items] ()}
keyword[return] identifier[dataset] . identifier[map] ( identifier[encode] , identifier[num_parallel_calls] = identifier[tf] . identifier[data] . identifier[experimental] . identifier[AUTOTUNE] )
|
def encode_dataset(dataset, vocabulary):
"""Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
"""
def encode(features):
return {k: vocabulary.encode_tf(v) for (k, v) in features.items()}
return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
def _next_non_ff_byte(self, start):
"""
Return an offset, byte 2-tuple for the next byte in *stream* that is
not '\xFF', starting with the byte at offset *start*. If the byte at
offset *start* is not '\xFF', *start* and the returned *offset* will
be the same.
"""
self._stream.seek(start)
byte_ = self._read_byte()
while byte_ == b'\xFF':
byte_ = self._read_byte()
offset_of_non_ff_byte = self._stream.tell() - 1
return offset_of_non_ff_byte, byte_
|
def function[_next_non_ff_byte, parameter[self, start]]:
constant[
Return an offset, byte 2-tuple for the next byte in *stream* that is
not 'ÿ', starting with the byte at offset *start*. If the byte at
offset *start* is not 'ÿ', *start* and the returned *offset* will
be the same.
]
call[name[self]._stream.seek, parameter[name[start]]]
variable[byte_] assign[=] call[name[self]._read_byte, parameter[]]
while compare[name[byte_] equal[==] constant[b'\xff']] begin[:]
variable[byte_] assign[=] call[name[self]._read_byte, parameter[]]
variable[offset_of_non_ff_byte] assign[=] binary_operation[call[name[self]._stream.tell, parameter[]] - constant[1]]
return[tuple[[<ast.Name object at 0x7da1b2189990>, <ast.Name object at 0x7da1b2188e80>]]]
|
keyword[def] identifier[_next_non_ff_byte] ( identifier[self] , identifier[start] ):
literal[string]
identifier[self] . identifier[_stream] . identifier[seek] ( identifier[start] )
identifier[byte_] = identifier[self] . identifier[_read_byte] ()
keyword[while] identifier[byte_] == literal[string] :
identifier[byte_] = identifier[self] . identifier[_read_byte] ()
identifier[offset_of_non_ff_byte] = identifier[self] . identifier[_stream] . identifier[tell] ()- literal[int]
keyword[return] identifier[offset_of_non_ff_byte] , identifier[byte_]
|
def _next_non_ff_byte(self, start):
"""
Return an offset, byte 2-tuple for the next byte in *stream* that is
not 'ÿ', starting with the byte at offset *start*. If the byte at
offset *start* is not 'ÿ', *start* and the returned *offset* will
be the same.
"""
self._stream.seek(start)
byte_ = self._read_byte()
while byte_ == b'\xff':
byte_ = self._read_byte() # depends on [control=['while'], data=['byte_']]
offset_of_non_ff_byte = self._stream.tell() - 1
return (offset_of_non_ff_byte, byte_)
|
def mask_circular_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec,
centre=(0.0, 0.0)):
"""Compute an annular masks from an input inner and outer masks radius and regular shape."""
mask = np.full(shape, True)
centres_arcsec = mask_centres_from_shape_pixel_scale_and_centre(shape=mask.shape, pixel_scale=pixel_scale, centre=centre)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
y_arcsec = (y - centres_arcsec[0]) * pixel_scale
x_arcsec = (x - centres_arcsec[1]) * pixel_scale
r_arcsec = np.sqrt(x_arcsec ** 2 + y_arcsec ** 2)
if outer_radius_arcsec >= r_arcsec >= inner_radius_arcsec:
mask[y, x] = False
return mask
|
def function[mask_circular_annular_from_shape_pixel_scale_and_radii, parameter[shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, centre]]:
constant[Compute an annular masks from an input inner and outer masks radius and regular shape.]
variable[mask] assign[=] call[name[np].full, parameter[name[shape], constant[True]]]
variable[centres_arcsec] assign[=] call[name[mask_centres_from_shape_pixel_scale_and_centre], parameter[]]
for taget[name[y]] in starred[call[name[range], parameter[call[name[mask].shape][constant[0]]]]] begin[:]
for taget[name[x]] in starred[call[name[range], parameter[call[name[mask].shape][constant[1]]]]] begin[:]
variable[y_arcsec] assign[=] binary_operation[binary_operation[name[y] - call[name[centres_arcsec]][constant[0]]] * name[pixel_scale]]
variable[x_arcsec] assign[=] binary_operation[binary_operation[name[x] - call[name[centres_arcsec]][constant[1]]] * name[pixel_scale]]
variable[r_arcsec] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[x_arcsec] ** constant[2]] + binary_operation[name[y_arcsec] ** constant[2]]]]]
if compare[name[outer_radius_arcsec] greater_or_equal[>=] name[r_arcsec]] begin[:]
call[name[mask]][tuple[[<ast.Name object at 0x7da20c76dab0>, <ast.Name object at 0x7da20c76c4c0>]]] assign[=] constant[False]
return[name[mask]]
|
keyword[def] identifier[mask_circular_annular_from_shape_pixel_scale_and_radii] ( identifier[shape] , identifier[pixel_scale] , identifier[inner_radius_arcsec] , identifier[outer_radius_arcsec] ,
identifier[centre] =( literal[int] , literal[int] )):
literal[string]
identifier[mask] = identifier[np] . identifier[full] ( identifier[shape] , keyword[True] )
identifier[centres_arcsec] = identifier[mask_centres_from_shape_pixel_scale_and_centre] ( identifier[shape] = identifier[mask] . identifier[shape] , identifier[pixel_scale] = identifier[pixel_scale] , identifier[centre] = identifier[centre] )
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[mask] . identifier[shape] [ literal[int] ]):
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[mask] . identifier[shape] [ literal[int] ]):
identifier[y_arcsec] =( identifier[y] - identifier[centres_arcsec] [ literal[int] ])* identifier[pixel_scale]
identifier[x_arcsec] =( identifier[x] - identifier[centres_arcsec] [ literal[int] ])* identifier[pixel_scale]
identifier[r_arcsec] = identifier[np] . identifier[sqrt] ( identifier[x_arcsec] ** literal[int] + identifier[y_arcsec] ** literal[int] )
keyword[if] identifier[outer_radius_arcsec] >= identifier[r_arcsec] >= identifier[inner_radius_arcsec] :
identifier[mask] [ identifier[y] , identifier[x] ]= keyword[False]
keyword[return] identifier[mask]
|
def mask_circular_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, centre=(0.0, 0.0)):
"""Compute an annular masks from an input inner and outer masks radius and regular shape."""
mask = np.full(shape, True)
centres_arcsec = mask_centres_from_shape_pixel_scale_and_centre(shape=mask.shape, pixel_scale=pixel_scale, centre=centre)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
y_arcsec = (y - centres_arcsec[0]) * pixel_scale
x_arcsec = (x - centres_arcsec[1]) * pixel_scale
r_arcsec = np.sqrt(x_arcsec ** 2 + y_arcsec ** 2)
if outer_radius_arcsec >= r_arcsec >= inner_radius_arcsec:
mask[y, x] = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']]
return mask
|
def get_blocks(self, chrom, start, end):
"""
Get any blocks in this alignment that overlap the given location.
:return: the alignment blocks that overlap a given genomic interval;
potentially none, in which case the empty list is returned.
"""
if chrom not in self.block_trees:
return []
return self.block_trees[chrom].intersectingInterval(start, end)
|
def function[get_blocks, parameter[self, chrom, start, end]]:
constant[
Get any blocks in this alignment that overlap the given location.
:return: the alignment blocks that overlap a given genomic interval;
potentially none, in which case the empty list is returned.
]
if compare[name[chrom] <ast.NotIn object at 0x7da2590d7190> name[self].block_trees] begin[:]
return[list[[]]]
return[call[call[name[self].block_trees][name[chrom]].intersectingInterval, parameter[name[start], name[end]]]]
|
keyword[def] identifier[get_blocks] ( identifier[self] , identifier[chrom] , identifier[start] , identifier[end] ):
literal[string]
keyword[if] identifier[chrom] keyword[not] keyword[in] identifier[self] . identifier[block_trees] :
keyword[return] []
keyword[return] identifier[self] . identifier[block_trees] [ identifier[chrom] ]. identifier[intersectingInterval] ( identifier[start] , identifier[end] )
|
def get_blocks(self, chrom, start, end):
"""
Get any blocks in this alignment that overlap the given location.
:return: the alignment blocks that overlap a given genomic interval;
potentially none, in which case the empty list is returned.
"""
if chrom not in self.block_trees:
return [] # depends on [control=['if'], data=[]]
return self.block_trees[chrom].intersectingInterval(start, end)
|
def _build(self, inputs):
"""Connects the SliceByDim module into the graph.
Args:
inputs: `Tensor` to slice. Its rank must be greater than the maximum
dimension specified in `dims` (plus one as python is 0 indexed).
Returns:
The sliced tensor.
Raises:
ValueError: If `inputs` tensor has insufficient rank.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Checks that the rank of the tensor.
max_dim = np.max(self._dims) + 1
if rank < max_dim:
raise ValueError("Rank of inputs must be at least {}.".format(max_dim))
# Builds default lists for begin and size to pass to `tf.slice`.
full_begin = [0] * rank
full_size = [-1] * rank
# Updates lists with what the user provided.
for dim, begin, size in zip(self._dims, self._begin, self._size):
full_begin[dim] = begin
full_size[dim] = size
return tf.slice(inputs, begin=full_begin, size=full_size)
|
def function[_build, parameter[self, inputs]]:
constant[Connects the SliceByDim module into the graph.
Args:
inputs: `Tensor` to slice. Its rank must be greater than the maximum
dimension specified in `dims` (plus one as python is 0 indexed).
Returns:
The sliced tensor.
Raises:
ValueError: If `inputs` tensor has insufficient rank.
]
variable[shape_inputs] assign[=] call[call[name[inputs].get_shape, parameter[]].as_list, parameter[]]
variable[rank] assign[=] call[name[len], parameter[name[shape_inputs]]]
variable[max_dim] assign[=] binary_operation[call[name[np].max, parameter[name[self]._dims]] + constant[1]]
if compare[name[rank] less[<] name[max_dim]] begin[:]
<ast.Raise object at 0x7da1b1c79f00>
variable[full_begin] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1c7a050>]] * name[rank]]
variable[full_size] assign[=] binary_operation[list[[<ast.UnaryOp object at 0x7da1b1f0b7f0>]] * name[rank]]
for taget[tuple[[<ast.Name object at 0x7da1b1f0bdc0>, <ast.Name object at 0x7da1b2127760>, <ast.Name object at 0x7da1b21276a0>]]] in starred[call[name[zip], parameter[name[self]._dims, name[self]._begin, name[self]._size]]] begin[:]
call[name[full_begin]][name[dim]] assign[=] name[begin]
call[name[full_size]][name[dim]] assign[=] name[size]
return[call[name[tf].slice, parameter[name[inputs]]]]
|
keyword[def] identifier[_build] ( identifier[self] , identifier[inputs] ):
literal[string]
identifier[shape_inputs] = identifier[inputs] . identifier[get_shape] (). identifier[as_list] ()
identifier[rank] = identifier[len] ( identifier[shape_inputs] )
identifier[max_dim] = identifier[np] . identifier[max] ( identifier[self] . identifier[_dims] )+ literal[int]
keyword[if] identifier[rank] < identifier[max_dim] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[max_dim] ))
identifier[full_begin] =[ literal[int] ]* identifier[rank]
identifier[full_size] =[- literal[int] ]* identifier[rank]
keyword[for] identifier[dim] , identifier[begin] , identifier[size] keyword[in] identifier[zip] ( identifier[self] . identifier[_dims] , identifier[self] . identifier[_begin] , identifier[self] . identifier[_size] ):
identifier[full_begin] [ identifier[dim] ]= identifier[begin]
identifier[full_size] [ identifier[dim] ]= identifier[size]
keyword[return] identifier[tf] . identifier[slice] ( identifier[inputs] , identifier[begin] = identifier[full_begin] , identifier[size] = identifier[full_size] )
|
def _build(self, inputs):
"""Connects the SliceByDim module into the graph.
Args:
inputs: `Tensor` to slice. Its rank must be greater than the maximum
dimension specified in `dims` (plus one as python is 0 indexed).
Returns:
The sliced tensor.
Raises:
ValueError: If `inputs` tensor has insufficient rank.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Checks that the rank of the tensor.
max_dim = np.max(self._dims) + 1
if rank < max_dim:
raise ValueError('Rank of inputs must be at least {}.'.format(max_dim)) # depends on [control=['if'], data=['max_dim']]
# Builds default lists for begin and size to pass to `tf.slice`.
full_begin = [0] * rank
full_size = [-1] * rank
# Updates lists with what the user provided.
for (dim, begin, size) in zip(self._dims, self._begin, self._size):
full_begin[dim] = begin
full_size[dim] = size # depends on [control=['for'], data=[]]
return tf.slice(inputs, begin=full_begin, size=full_size)
|
def is_same_as(self, other):
"""Asserts that the val is identical to other, via 'is' compare."""
if self.val is not other:
self._err('Expected <%s> to be identical to <%s>, but was not.' % (self.val, other))
return self
|
def function[is_same_as, parameter[self, other]]:
constant[Asserts that the val is identical to other, via 'is' compare.]
if compare[name[self].val is_not name[other]] begin[:]
call[name[self]._err, parameter[binary_operation[constant[Expected <%s> to be identical to <%s>, but was not.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c9910f0>, <ast.Name object at 0x7da20c990340>]]]]]
return[name[self]]
|
keyword[def] identifier[is_same_as] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] identifier[self] . identifier[val] keyword[is] keyword[not] identifier[other] :
identifier[self] . identifier[_err] ( literal[string] %( identifier[self] . identifier[val] , identifier[other] ))
keyword[return] identifier[self]
|
def is_same_as(self, other):
"""Asserts that the val is identical to other, via 'is' compare."""
if self.val is not other:
self._err('Expected <%s> to be identical to <%s>, but was not.' % (self.val, other)) # depends on [control=['if'], data=['other']]
return self
|
def tag(self, tag, child='', enclose=0, newline=True, **kwargs):
"""
enclose:
0 => <tag>
1 => <tag/>
2 => <tag></tag>
"""
kw = kwargs.copy()
_class = ''
if '_class' in kw:
_class = kw.pop('_class')
if 'class' in kw:
_class += ' ' + kw.pop('class')
tag_class = self.tag_class.get(tag, '')
if tag_class:
#if tag_class definition starts with '+', and combine it with original value
if tag_class.startswith('+'):
kw['class'] = tag_class[1:] + ' ' + _class.lstrip()
else:
kw['class'] = tag_class.lstrip()
else:
kw['class'] = _class.lstrip()
#process inner and outter link
if tag == 'a':
print ('------', kw)
href = kw.get('href', '#')
if href and (href.startswith('http:') or href.startswith('https:') or href.startswith('ftp:')):
_cls = 'outter'
else:
_cls = 'inner'
kw['href'] = href
if kw.get('class'):
kw['class'] = kw['class'] + ' ' + _cls
else:
kw['class'] = _cls
attrs = ' '.join(['%s="%s"' % (x, y) for x, y in sorted(kw.items()) if y])
if attrs:
attrs = ' ' + attrs
nline = '\n' if newline else ''
if child:
enclose = 2
if enclose == 1:
return '<%s%s/>%s' % (tag, attrs, nline)
elif enclose == 2:
return '<%s%s>%s</%s>%s' % (tag, attrs, child, tag, nline)
else:
return '<%s%s>%s' % (tag, attrs, nline)
|
def function[tag, parameter[self, tag, child, enclose, newline]]:
constant[
enclose:
0 => <tag>
1 => <tag/>
2 => <tag></tag>
]
variable[kw] assign[=] call[name[kwargs].copy, parameter[]]
variable[_class] assign[=] constant[]
if compare[constant[_class] in name[kw]] begin[:]
variable[_class] assign[=] call[name[kw].pop, parameter[constant[_class]]]
if compare[constant[class] in name[kw]] begin[:]
<ast.AugAssign object at 0x7da204344910>
variable[tag_class] assign[=] call[name[self].tag_class.get, parameter[name[tag], constant[]]]
if name[tag_class] begin[:]
if call[name[tag_class].startswith, parameter[constant[+]]] begin[:]
call[name[kw]][constant[class]] assign[=] binary_operation[binary_operation[call[name[tag_class]][<ast.Slice object at 0x7da204347d60>] + constant[ ]] + call[name[_class].lstrip, parameter[]]]
if compare[name[tag] equal[==] constant[a]] begin[:]
call[name[print], parameter[constant[------], name[kw]]]
variable[href] assign[=] call[name[kw].get, parameter[constant[href], constant[#]]]
if <ast.BoolOp object at 0x7da2043466e0> begin[:]
variable[_cls] assign[=] constant[outter]
if call[name[kw].get, parameter[constant[class]]] begin[:]
call[name[kw]][constant[class]] assign[=] binary_operation[binary_operation[call[name[kw]][constant[class]] + constant[ ]] + name[_cls]]
variable[attrs] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da204346f20>]]
if name[attrs] begin[:]
variable[attrs] assign[=] binary_operation[constant[ ] + name[attrs]]
variable[nline] assign[=] <ast.IfExp object at 0x7da204347d00>
if name[child] begin[:]
variable[enclose] assign[=] constant[2]
if compare[name[enclose] equal[==] constant[1]] begin[:]
return[binary_operation[constant[<%s%s/>%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c991270>, <ast.Name object at 0x7da20c993100>, <ast.Name object at 0x7da20c992050>]]]]
|
keyword[def] identifier[tag] ( identifier[self] , identifier[tag] , identifier[child] = literal[string] , identifier[enclose] = literal[int] , identifier[newline] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[kw] = identifier[kwargs] . identifier[copy] ()
identifier[_class] = literal[string]
keyword[if] literal[string] keyword[in] identifier[kw] :
identifier[_class] = identifier[kw] . identifier[pop] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kw] :
identifier[_class] += literal[string] + identifier[kw] . identifier[pop] ( literal[string] )
identifier[tag_class] = identifier[self] . identifier[tag_class] . identifier[get] ( identifier[tag] , literal[string] )
keyword[if] identifier[tag_class] :
keyword[if] identifier[tag_class] . identifier[startswith] ( literal[string] ):
identifier[kw] [ literal[string] ]= identifier[tag_class] [ literal[int] :]+ literal[string] + identifier[_class] . identifier[lstrip] ()
keyword[else] :
identifier[kw] [ literal[string] ]= identifier[tag_class] . identifier[lstrip] ()
keyword[else] :
identifier[kw] [ literal[string] ]= identifier[_class] . identifier[lstrip] ()
keyword[if] identifier[tag] == literal[string] :
identifier[print] ( literal[string] , identifier[kw] )
identifier[href] = identifier[kw] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[href] keyword[and] ( identifier[href] . identifier[startswith] ( literal[string] ) keyword[or] identifier[href] . identifier[startswith] ( literal[string] ) keyword[or] identifier[href] . identifier[startswith] ( literal[string] )):
identifier[_cls] = literal[string]
keyword[else] :
identifier[_cls] = literal[string]
identifier[kw] [ literal[string] ]= identifier[href]
keyword[if] identifier[kw] . identifier[get] ( literal[string] ):
identifier[kw] [ literal[string] ]= identifier[kw] [ literal[string] ]+ literal[string] + identifier[_cls]
keyword[else] :
identifier[kw] [ literal[string] ]= identifier[_cls]
identifier[attrs] = literal[string] . identifier[join] ([ literal[string] %( identifier[x] , identifier[y] ) keyword[for] identifier[x] , identifier[y] keyword[in] identifier[sorted] ( identifier[kw] . identifier[items] ()) keyword[if] identifier[y] ])
keyword[if] identifier[attrs] :
identifier[attrs] = literal[string] + identifier[attrs]
identifier[nline] = literal[string] keyword[if] identifier[newline] keyword[else] literal[string]
keyword[if] identifier[child] :
identifier[enclose] = literal[int]
keyword[if] identifier[enclose] == literal[int] :
keyword[return] literal[string] %( identifier[tag] , identifier[attrs] , identifier[nline] )
keyword[elif] identifier[enclose] == literal[int] :
keyword[return] literal[string] %( identifier[tag] , identifier[attrs] , identifier[child] , identifier[tag] , identifier[nline] )
keyword[else] :
keyword[return] literal[string] %( identifier[tag] , identifier[attrs] , identifier[nline] )
|
def tag(self, tag, child='', enclose=0, newline=True, **kwargs):
"""
enclose:
0 => <tag>
1 => <tag/>
2 => <tag></tag>
"""
kw = kwargs.copy()
_class = ''
if '_class' in kw:
_class = kw.pop('_class') # depends on [control=['if'], data=['kw']]
if 'class' in kw:
_class += ' ' + kw.pop('class') # depends on [control=['if'], data=['kw']]
tag_class = self.tag_class.get(tag, '')
if tag_class:
#if tag_class definition starts with '+', and combine it with original value
if tag_class.startswith('+'):
kw['class'] = tag_class[1:] + ' ' + _class.lstrip() # depends on [control=['if'], data=[]]
else:
kw['class'] = tag_class.lstrip() # depends on [control=['if'], data=[]]
else:
kw['class'] = _class.lstrip()
#process inner and outter link
if tag == 'a':
print('------', kw)
href = kw.get('href', '#')
if href and (href.startswith('http:') or href.startswith('https:') or href.startswith('ftp:')):
_cls = 'outter' # depends on [control=['if'], data=[]]
else:
_cls = 'inner'
kw['href'] = href
if kw.get('class'):
kw['class'] = kw['class'] + ' ' + _cls # depends on [control=['if'], data=[]]
else:
kw['class'] = _cls # depends on [control=['if'], data=[]]
attrs = ' '.join(['%s="%s"' % (x, y) for (x, y) in sorted(kw.items()) if y])
if attrs:
attrs = ' ' + attrs # depends on [control=['if'], data=[]]
nline = '\n' if newline else ''
if child:
enclose = 2 # depends on [control=['if'], data=[]]
if enclose == 1:
return '<%s%s/>%s' % (tag, attrs, nline) # depends on [control=['if'], data=[]]
elif enclose == 2:
return '<%s%s>%s</%s>%s' % (tag, attrs, child, tag, nline) # depends on [control=['if'], data=[]]
else:
return '<%s%s>%s' % (tag, attrs, nline)
|
def receiveds_parsing(receiveds):
"""
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
"""
parsed = []
receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds]
n = len(receiveds)
log.debug("Nr. of receiveds. {}".format(n))
for idx, received in enumerate(receiveds):
log.debug("Parsing received {}/{}".format(idx + 1, n))
log.debug("Try to parse {!r}".format(received))
try:
# try to parse the current received header...
values_by_clause = parse_received(received)
except MailParserReceivedParsingError:
# if we can't, let's append the raw
parsed.append({'raw': received})
else:
# otherwise append the full values_by_clause dict
parsed.append(values_by_clause)
log.debug("len(receiveds) %s, len(parsed) %s" % (
len(receiveds), len(parsed)))
if len(receiveds) != len(parsed):
# something really bad happened,
# so just return raw receiveds with hop indices
log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \
parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed))
return receiveds_not_parsed(receiveds)
else:
# all's good! we have parsed or raw receiveds for each received header
return receiveds_format(parsed)
|
def function[receiveds_parsing, parameter[receiveds]]:
constant[
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
]
variable[parsed] assign[=] list[[]]
variable[receiveds] assign[=] <ast.ListComp object at 0x7da1b07cda20>
variable[n] assign[=] call[name[len], parameter[name[receiveds]]]
call[name[log].debug, parameter[call[constant[Nr. of receiveds. {}].format, parameter[name[n]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b07ce680>, <ast.Name object at 0x7da1b07ce7a0>]]] in starred[call[name[enumerate], parameter[name[receiveds]]]] begin[:]
call[name[log].debug, parameter[call[constant[Parsing received {}/{}].format, parameter[binary_operation[name[idx] + constant[1]], name[n]]]]]
call[name[log].debug, parameter[call[constant[Try to parse {!r}].format, parameter[name[received]]]]]
<ast.Try object at 0x7da1b07cc730>
call[name[log].debug, parameter[binary_operation[constant[len(receiveds) %s, len(parsed) %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b07cda50>, <ast.Call object at 0x7da1b07ccee0>]]]]]
if compare[call[name[len], parameter[name[receiveds]]] not_equal[!=] call[name[len], parameter[name[parsed]]]] begin[:]
call[name[log].error, parameter[binary_operation[constant[len(receiveds): %s, len(parsed): %s, receiveds: %s, parsed: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b07f7220>, <ast.Call object at 0x7da1b07f4430>, <ast.Name object at 0x7da1b07f46d0>, <ast.Name object at 0x7da1b07f41c0>]]]]]
return[call[name[receiveds_not_parsed], parameter[name[receiveds]]]]
|
keyword[def] identifier[receiveds_parsing] ( identifier[receiveds] ):
literal[string]
identifier[parsed] =[]
identifier[receiveds] =[ identifier[re] . identifier[sub] ( identifier[JUNK_PATTERN] , literal[string] , identifier[i] ). identifier[strip] () keyword[for] identifier[i] keyword[in] identifier[receiveds] ]
identifier[n] = identifier[len] ( identifier[receiveds] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[n] ))
keyword[for] identifier[idx] , identifier[received] keyword[in] identifier[enumerate] ( identifier[receiveds] ):
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[idx] + literal[int] , identifier[n] ))
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[received] ))
keyword[try] :
identifier[values_by_clause] = identifier[parse_received] ( identifier[received] )
keyword[except] identifier[MailParserReceivedParsingError] :
identifier[parsed] . identifier[append] ({ literal[string] : identifier[received] })
keyword[else] :
identifier[parsed] . identifier[append] ( identifier[values_by_clause] )
identifier[log] . identifier[debug] ( literal[string] %(
identifier[len] ( identifier[receiveds] ), identifier[len] ( identifier[parsed] )))
keyword[if] identifier[len] ( identifier[receiveds] )!= identifier[len] ( identifier[parsed] ):
identifier[log] . identifier[error] ( literal[string] %( identifier[len] ( identifier[receiveds] ), identifier[len] ( identifier[parsed] ), identifier[receiveds] , identifier[parsed] ))
keyword[return] identifier[receiveds_not_parsed] ( identifier[receiveds] )
keyword[else] :
keyword[return] identifier[receiveds_format] ( identifier[parsed] )
|
def receiveds_parsing(receiveds):
"""
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
"""
parsed = []
receiveds = [re.sub(JUNK_PATTERN, ' ', i).strip() for i in receiveds]
n = len(receiveds)
log.debug('Nr. of receiveds. {}'.format(n))
for (idx, received) in enumerate(receiveds):
log.debug('Parsing received {}/{}'.format(idx + 1, n))
log.debug('Try to parse {!r}'.format(received))
try:
# try to parse the current received header...
values_by_clause = parse_received(received) # depends on [control=['try'], data=[]]
except MailParserReceivedParsingError:
# if we can't, let's append the raw
parsed.append({'raw': received}) # depends on [control=['except'], data=[]]
else:
# otherwise append the full values_by_clause dict
parsed.append(values_by_clause) # depends on [control=['for'], data=[]]
log.debug('len(receiveds) %s, len(parsed) %s' % (len(receiveds), len(parsed)))
if len(receiveds) != len(parsed):
# something really bad happened,
# so just return raw receiveds with hop indices
log.error('len(receiveds): %s, len(parsed): %s, receiveds: %s, parsed: %s' % (len(receiveds), len(parsed), receiveds, parsed))
return receiveds_not_parsed(receiveds) # depends on [control=['if'], data=[]]
else:
# all's good! we have parsed or raw receiveds for each received header
return receiveds_format(parsed)
|
def convert_string_value_to_type_value(string_value, data_type):
"""Helper function to convert a given string to a given data type
:param str string_value: the string to convert
:param type data_type: the target data type
:return: the converted value
"""
from ast import literal_eval
try:
if data_type in (str, type(None)):
converted_value = str(string_value)
elif data_type == int:
converted_value = int(string_value)
elif data_type == float:
converted_value = float(string_value)
elif data_type == bool:
converted_value = bool(literal_eval(string_value))
elif data_type in (list, dict, tuple):
converted_value = literal_eval(string_value)
if type(converted_value) != data_type:
raise ValueError("Invalid syntax: {0}".format(string_value))
elif data_type == object:
try:
converted_value = literal_eval(string_value)
except (ValueError, SyntaxError):
converted_value = literal_eval('"' + string_value + '"')
elif isinstance(data_type, type): # Try native type conversion
converted_value = data_type(string_value)
elif isclass(data_type): # Call class constructor
converted_value = data_type(string_value)
else:
raise ValueError("No conversion from string '{0}' to data type '{0}' defined".format(
string_value, data_type.__name__))
except (ValueError, SyntaxError, TypeError) as e:
raise AttributeError("Can't convert '{0}' to type '{1}': {2}".format(string_value, data_type.__name__, e))
return converted_value
|
def function[convert_string_value_to_type_value, parameter[string_value, data_type]]:
constant[Helper function to convert a given string to a given data type
:param str string_value: the string to convert
:param type data_type: the target data type
:return: the converted value
]
from relative_module[ast] import module[literal_eval]
<ast.Try object at 0x7da18ede75e0>
return[name[converted_value]]
|
keyword[def] identifier[convert_string_value_to_type_value] ( identifier[string_value] , identifier[data_type] ):
literal[string]
keyword[from] identifier[ast] keyword[import] identifier[literal_eval]
keyword[try] :
keyword[if] identifier[data_type] keyword[in] ( identifier[str] , identifier[type] ( keyword[None] )):
identifier[converted_value] = identifier[str] ( identifier[string_value] )
keyword[elif] identifier[data_type] == identifier[int] :
identifier[converted_value] = identifier[int] ( identifier[string_value] )
keyword[elif] identifier[data_type] == identifier[float] :
identifier[converted_value] = identifier[float] ( identifier[string_value] )
keyword[elif] identifier[data_type] == identifier[bool] :
identifier[converted_value] = identifier[bool] ( identifier[literal_eval] ( identifier[string_value] ))
keyword[elif] identifier[data_type] keyword[in] ( identifier[list] , identifier[dict] , identifier[tuple] ):
identifier[converted_value] = identifier[literal_eval] ( identifier[string_value] )
keyword[if] identifier[type] ( identifier[converted_value] )!= identifier[data_type] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[string_value] ))
keyword[elif] identifier[data_type] == identifier[object] :
keyword[try] :
identifier[converted_value] = identifier[literal_eval] ( identifier[string_value] )
keyword[except] ( identifier[ValueError] , identifier[SyntaxError] ):
identifier[converted_value] = identifier[literal_eval] ( literal[string] + identifier[string_value] + literal[string] )
keyword[elif] identifier[isinstance] ( identifier[data_type] , identifier[type] ):
identifier[converted_value] = identifier[data_type] ( identifier[string_value] )
keyword[elif] identifier[isclass] ( identifier[data_type] ):
identifier[converted_value] = identifier[data_type] ( identifier[string_value] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[string_value] , identifier[data_type] . identifier[__name__] ))
keyword[except] ( identifier[ValueError] , identifier[SyntaxError] , identifier[TypeError] ) keyword[as] identifier[e] :
keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] ( identifier[string_value] , identifier[data_type] . identifier[__name__] , identifier[e] ))
keyword[return] identifier[converted_value]
|
def convert_string_value_to_type_value(string_value, data_type):
"""Helper function to convert a given string to a given data type
:param str string_value: the string to convert
:param type data_type: the target data type
:return: the converted value
"""
from ast import literal_eval
try:
if data_type in (str, type(None)):
converted_value = str(string_value) # depends on [control=['if'], data=[]]
elif data_type == int:
converted_value = int(string_value) # depends on [control=['if'], data=['int']]
elif data_type == float:
converted_value = float(string_value) # depends on [control=['if'], data=['float']]
elif data_type == bool:
converted_value = bool(literal_eval(string_value)) # depends on [control=['if'], data=['bool']]
elif data_type in (list, dict, tuple):
converted_value = literal_eval(string_value)
if type(converted_value) != data_type:
raise ValueError('Invalid syntax: {0}'.format(string_value)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['data_type']]
elif data_type == object:
try:
converted_value = literal_eval(string_value) # depends on [control=['try'], data=[]]
except (ValueError, SyntaxError):
converted_value = literal_eval('"' + string_value + '"') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(data_type, type): # Try native type conversion
converted_value = data_type(string_value) # depends on [control=['if'], data=[]]
elif isclass(data_type): # Call class constructor
converted_value = data_type(string_value) # depends on [control=['if'], data=[]]
else:
raise ValueError("No conversion from string '{0}' to data type '{0}' defined".format(string_value, data_type.__name__)) # depends on [control=['try'], data=[]]
except (ValueError, SyntaxError, TypeError) as e:
raise AttributeError("Can't convert '{0}' to type '{1}': {2}".format(string_value, data_type.__name__, e)) # depends on [control=['except'], data=['e']]
return converted_value
|
def load(self, filething):
"""Load tags from a filename.
Raises apev2.error
"""
data = _APEv2Data(filething.fileobj)
if data.tag:
self.clear()
self.__parse_tag(data.tag, data.items)
else:
raise APENoHeaderError("No APE tag found")
|
def function[load, parameter[self, filething]]:
constant[Load tags from a filename.
Raises apev2.error
]
variable[data] assign[=] call[name[_APEv2Data], parameter[name[filething].fileobj]]
if name[data].tag begin[:]
call[name[self].clear, parameter[]]
call[name[self].__parse_tag, parameter[name[data].tag, name[data].items]]
|
keyword[def] identifier[load] ( identifier[self] , identifier[filething] ):
literal[string]
identifier[data] = identifier[_APEv2Data] ( identifier[filething] . identifier[fileobj] )
keyword[if] identifier[data] . identifier[tag] :
identifier[self] . identifier[clear] ()
identifier[self] . identifier[__parse_tag] ( identifier[data] . identifier[tag] , identifier[data] . identifier[items] )
keyword[else] :
keyword[raise] identifier[APENoHeaderError] ( literal[string] )
|
def load(self, filething):
"""Load tags from a filename.
Raises apev2.error
"""
data = _APEv2Data(filething.fileobj)
if data.tag:
self.clear()
self.__parse_tag(data.tag, data.items) # depends on [control=['if'], data=[]]
else:
raise APENoHeaderError('No APE tag found')
|
def add_play(self, choice, count=1):
"""Increments the play count for a given experiment choice"""
self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "%s:plays" % choice, count)
self._choices = None
|
def function[add_play, parameter[self, choice, count]]:
constant[Increments the play count for a given experiment choice]
call[name[self].redis.hincrby, parameter[binary_operation[name[EXPERIMENT_REDIS_KEY_TEMPLATE] <ast.Mod object at 0x7da2590d6920> name[self].name], binary_operation[constant[%s:plays] <ast.Mod object at 0x7da2590d6920> name[choice]], name[count]]]
name[self]._choices assign[=] constant[None]
|
keyword[def] identifier[add_play] ( identifier[self] , identifier[choice] , identifier[count] = literal[int] ):
literal[string]
identifier[self] . identifier[redis] . identifier[hincrby] ( identifier[EXPERIMENT_REDIS_KEY_TEMPLATE] % identifier[self] . identifier[name] , literal[string] % identifier[choice] , identifier[count] )
identifier[self] . identifier[_choices] = keyword[None]
|
def add_play(self, choice, count=1):
"""Increments the play count for a given experiment choice"""
self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, '%s:plays' % choice, count)
self._choices = None
|
def discover_and_apply(self, directory=None, dry_run=False):
"""
Retrieve the patches and try to apply them against the datamodel
:param directory: Directory to search the patch in (default: patches_dir)
:param dry_run: Don't actually apply the patches
"""
directory = directory or self.patches_dir
patches_dict = {p.base_version: p for p in self.discover(directory)}
current_version = self.manifest.version
if not patches_dict.get(current_version):
print('No patch to apply')
return
if dry_run:
msg = 'Datamodel should be in version %s !'
else:
msg = 'Datamodel in now in version %s !'
pss = []
while True:
patch = patches_dict.get(current_version)
if not patch:
print(msg % current_version)
if pss:
print()
print(yellow('\n'.join(pss)))
return
print('Applying patch %s => %s' % (patch.base_version,
patch.target_version))
patch_pss = [patch.ps] if patch.ps else []
if not dry_run:
patch_pss += self.apply_patch(patch)
if patch_pss:
pss.append("Patch %s:\n%s" % (patch.target_version,
tabulate('\n'.join(patch_pss))))
self.manifest.reload()
current_version = patch.target_version
|
def function[discover_and_apply, parameter[self, directory, dry_run]]:
constant[
Retrieve the patches and try to apply them against the datamodel
:param directory: Directory to search the patch in (default: patches_dir)
:param dry_run: Don't actually apply the patches
]
variable[directory] assign[=] <ast.BoolOp object at 0x7da1b0965b40>
variable[patches_dict] assign[=] <ast.DictComp object at 0x7da1b09664d0>
variable[current_version] assign[=] name[self].manifest.version
if <ast.UnaryOp object at 0x7da1b0964fa0> begin[:]
call[name[print], parameter[constant[No patch to apply]]]
return[None]
if name[dry_run] begin[:]
variable[msg] assign[=] constant[Datamodel should be in version %s !]
variable[pss] assign[=] list[[]]
while constant[True] begin[:]
variable[patch] assign[=] call[name[patches_dict].get, parameter[name[current_version]]]
if <ast.UnaryOp object at 0x7da1b0966080> begin[:]
call[name[print], parameter[binary_operation[name[msg] <ast.Mod object at 0x7da2590d6920> name[current_version]]]]
if name[pss] begin[:]
call[name[print], parameter[]]
call[name[print], parameter[call[name[yellow], parameter[call[constant[
].join, parameter[name[pss]]]]]]]
return[None]
call[name[print], parameter[binary_operation[constant[Applying patch %s => %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b09671c0>, <ast.Attribute object at 0x7da1b0964040>]]]]]
variable[patch_pss] assign[=] <ast.IfExp object at 0x7da1b0967d90>
if <ast.UnaryOp object at 0x7da1b0966ec0> begin[:]
<ast.AugAssign object at 0x7da1b0965600>
if name[patch_pss] begin[:]
call[name[pss].append, parameter[binary_operation[constant[Patch %s:
%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b095f7f0>, <ast.Call object at 0x7da1b095dae0>]]]]]
call[name[self].manifest.reload, parameter[]]
variable[current_version] assign[=] name[patch].target_version
|
keyword[def] identifier[discover_and_apply] ( identifier[self] , identifier[directory] = keyword[None] , identifier[dry_run] = keyword[False] ):
literal[string]
identifier[directory] = identifier[directory] keyword[or] identifier[self] . identifier[patches_dir]
identifier[patches_dict] ={ identifier[p] . identifier[base_version] : identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[discover] ( identifier[directory] )}
identifier[current_version] = identifier[self] . identifier[manifest] . identifier[version]
keyword[if] keyword[not] identifier[patches_dict] . identifier[get] ( identifier[current_version] ):
identifier[print] ( literal[string] )
keyword[return]
keyword[if] identifier[dry_run] :
identifier[msg] = literal[string]
keyword[else] :
identifier[msg] = literal[string]
identifier[pss] =[]
keyword[while] keyword[True] :
identifier[patch] = identifier[patches_dict] . identifier[get] ( identifier[current_version] )
keyword[if] keyword[not] identifier[patch] :
identifier[print] ( identifier[msg] % identifier[current_version] )
keyword[if] identifier[pss] :
identifier[print] ()
identifier[print] ( identifier[yellow] ( literal[string] . identifier[join] ( identifier[pss] )))
keyword[return]
identifier[print] ( literal[string] %( identifier[patch] . identifier[base_version] ,
identifier[patch] . identifier[target_version] ))
identifier[patch_pss] =[ identifier[patch] . identifier[ps] ] keyword[if] identifier[patch] . identifier[ps] keyword[else] []
keyword[if] keyword[not] identifier[dry_run] :
identifier[patch_pss] += identifier[self] . identifier[apply_patch] ( identifier[patch] )
keyword[if] identifier[patch_pss] :
identifier[pss] . identifier[append] ( literal[string] %( identifier[patch] . identifier[target_version] ,
identifier[tabulate] ( literal[string] . identifier[join] ( identifier[patch_pss] ))))
identifier[self] . identifier[manifest] . identifier[reload] ()
identifier[current_version] = identifier[patch] . identifier[target_version]
|
def discover_and_apply(self, directory=None, dry_run=False):
"""
Retrieve the patches and try to apply them against the datamodel
:param directory: Directory to search the patch in (default: patches_dir)
:param dry_run: Don't actually apply the patches
"""
directory = directory or self.patches_dir
patches_dict = {p.base_version: p for p in self.discover(directory)}
current_version = self.manifest.version
if not patches_dict.get(current_version):
print('No patch to apply')
return # depends on [control=['if'], data=[]]
if dry_run:
msg = 'Datamodel should be in version %s !' # depends on [control=['if'], data=[]]
else:
msg = 'Datamodel in now in version %s !'
pss = []
while True:
patch = patches_dict.get(current_version)
if not patch:
print(msg % current_version)
if pss:
print()
print(yellow('\n'.join(pss))) # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
print('Applying patch %s => %s' % (patch.base_version, patch.target_version))
patch_pss = [patch.ps] if patch.ps else []
if not dry_run:
patch_pss += self.apply_patch(patch) # depends on [control=['if'], data=[]]
if patch_pss:
pss.append('Patch %s:\n%s' % (patch.target_version, tabulate('\n'.join(patch_pss)))) # depends on [control=['if'], data=[]]
self.manifest.reload()
current_version = patch.target_version # depends on [control=['while'], data=[]]
|
def chart_type(self, value):
"""Set the MetricsGraphics chart type.
Allowed charts are: line, histogram, point, and bar
Args:
value (str): chart type.
Raises:
ValueError: Not a valid chart type.
"""
if value not in self._allowed_charts:
raise ValueError("Not a valid chart type")
self.options["chart_type"] = value
|
def function[chart_type, parameter[self, value]]:
constant[Set the MetricsGraphics chart type.
Allowed charts are: line, histogram, point, and bar
Args:
value (str): chart type.
Raises:
ValueError: Not a valid chart type.
]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> name[self]._allowed_charts] begin[:]
<ast.Raise object at 0x7da1b1847700>
call[name[self].options][constant[chart_type]] assign[=] name[value]
|
keyword[def] identifier[chart_type] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[not] keyword[in] identifier[self] . identifier[_allowed_charts] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[options] [ literal[string] ]= identifier[value]
|
def chart_type(self, value):
"""Set the MetricsGraphics chart type.
Allowed charts are: line, histogram, point, and bar
Args:
value (str): chart type.
Raises:
ValueError: Not a valid chart type.
"""
if value not in self._allowed_charts:
raise ValueError('Not a valid chart type') # depends on [control=['if'], data=[]]
self.options['chart_type'] = value
|
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging):
'''
See the command line help
'''
prefixes = prefixes or {}
g = graph or rdflib.Graph()
#g.bind('bf', BFNS)
#g.bind('bfc', BFCNS)
#g.bind('bfd', BFDNS)
g.bind('v', VNS)
for k, v in prefixes.items():
g.bind(k, v)
for m in models:
base_out = m.base
process(m, g, rdfsonly, base=base_out, logger=logger)
return g
|
def function[write, parameter[models, base, graph, rdfsonly, prefixes, logger]]:
constant[
See the command line help
]
variable[prefixes] assign[=] <ast.BoolOp object at 0x7da204620850>
variable[g] assign[=] <ast.BoolOp object at 0x7da204623bb0>
call[name[g].bind, parameter[constant[v], name[VNS]]]
for taget[tuple[[<ast.Name object at 0x7da204623e20>, <ast.Name object at 0x7da204621bd0>]]] in starred[call[name[prefixes].items, parameter[]]] begin[:]
call[name[g].bind, parameter[name[k], name[v]]]
for taget[name[m]] in starred[name[models]] begin[:]
variable[base_out] assign[=] name[m].base
call[name[process], parameter[name[m], name[g], name[rdfsonly]]]
return[name[g]]
|
keyword[def] identifier[write] ( identifier[models] , identifier[base] = keyword[None] , identifier[graph] = keyword[None] , identifier[rdfsonly] = keyword[False] , identifier[prefixes] = keyword[None] , identifier[logger] = identifier[logging] ):
literal[string]
identifier[prefixes] = identifier[prefixes] keyword[or] {}
identifier[g] = identifier[graph] keyword[or] identifier[rdflib] . identifier[Graph] ()
identifier[g] . identifier[bind] ( literal[string] , identifier[VNS] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[prefixes] . identifier[items] ():
identifier[g] . identifier[bind] ( identifier[k] , identifier[v] )
keyword[for] identifier[m] keyword[in] identifier[models] :
identifier[base_out] = identifier[m] . identifier[base]
identifier[process] ( identifier[m] , identifier[g] , identifier[rdfsonly] , identifier[base] = identifier[base_out] , identifier[logger] = identifier[logger] )
keyword[return] identifier[g]
|
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging):
"""
See the command line help
"""
prefixes = prefixes or {}
g = graph or rdflib.Graph()
#g.bind('bf', BFNS)
#g.bind('bfc', BFCNS)
#g.bind('bfd', BFDNS)
g.bind('v', VNS)
for (k, v) in prefixes.items():
g.bind(k, v) # depends on [control=['for'], data=[]]
for m in models:
base_out = m.base
process(m, g, rdfsonly, base=base_out, logger=logger) # depends on [control=['for'], data=['m']]
return g
|
def row_coordinates(self, X):
"""Returns the row principal coordinates.
The row principal coordinates are obtained by projecting `X` on the right eigenvectors.
"""
utils.validation.check_is_fitted(self, 's_')
# Extract index
index = X.index if isinstance(X, pd.DataFrame) else None
# Copy data
if self.copy:
X = np.copy(X)
# Scale data
if hasattr(self, 'scaler_'):
X = self.scaler_.transform(X)
return pd.DataFrame(data=X.dot(self.V_.T), index=index)
|
def function[row_coordinates, parameter[self, X]]:
constant[Returns the row principal coordinates.
The row principal coordinates are obtained by projecting `X` on the right eigenvectors.
]
call[name[utils].validation.check_is_fitted, parameter[name[self], constant[s_]]]
variable[index] assign[=] <ast.IfExp object at 0x7da1b16dfdc0>
if name[self].copy begin[:]
variable[X] assign[=] call[name[np].copy, parameter[name[X]]]
if call[name[hasattr], parameter[name[self], constant[scaler_]]] begin[:]
variable[X] assign[=] call[name[self].scaler_.transform, parameter[name[X]]]
return[call[name[pd].DataFrame, parameter[]]]
|
keyword[def] identifier[row_coordinates] ( identifier[self] , identifier[X] ):
literal[string]
identifier[utils] . identifier[validation] . identifier[check_is_fitted] ( identifier[self] , literal[string] )
identifier[index] = identifier[X] . identifier[index] keyword[if] identifier[isinstance] ( identifier[X] , identifier[pd] . identifier[DataFrame] ) keyword[else] keyword[None]
keyword[if] identifier[self] . identifier[copy] :
identifier[X] = identifier[np] . identifier[copy] ( identifier[X] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[X] = identifier[self] . identifier[scaler_] . identifier[transform] ( identifier[X] )
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[X] . identifier[dot] ( identifier[self] . identifier[V_] . identifier[T] ), identifier[index] = identifier[index] )
|
def row_coordinates(self, X):
"""Returns the row principal coordinates.
The row principal coordinates are obtained by projecting `X` on the right eigenvectors.
"""
utils.validation.check_is_fitted(self, 's_')
# Extract index
index = X.index if isinstance(X, pd.DataFrame) else None
# Copy data
if self.copy:
X = np.copy(X) # depends on [control=['if'], data=[]]
# Scale data
if hasattr(self, 'scaler_'):
X = self.scaler_.transform(X) # depends on [control=['if'], data=[]]
return pd.DataFrame(data=X.dot(self.V_.T), index=index)
|
def prepare_static_data(self, data):
"""
If user defined static fields, then process them with visiable value
"""
d = self.obj.to_dict()
d.update(data.copy())
for f in self.get_fields():
if f['static'] and f['name'] in d:
v = make_view_field(f, self.obj, self.types_convert_map, self.fields_convert_map, d[f['name']])
d[f['name']] = v['display']
return d
|
def function[prepare_static_data, parameter[self, data]]:
constant[
If user defined static fields, then process them with visiable value
]
variable[d] assign[=] call[name[self].obj.to_dict, parameter[]]
call[name[d].update, parameter[call[name[data].copy, parameter[]]]]
for taget[name[f]] in starred[call[name[self].get_fields, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20c6ab730> begin[:]
variable[v] assign[=] call[name[make_view_field], parameter[name[f], name[self].obj, name[self].types_convert_map, name[self].fields_convert_map, call[name[d]][call[name[f]][constant[name]]]]]
call[name[d]][call[name[f]][constant[name]]] assign[=] call[name[v]][constant[display]]
return[name[d]]
|
keyword[def] identifier[prepare_static_data] ( identifier[self] , identifier[data] ):
literal[string]
identifier[d] = identifier[self] . identifier[obj] . identifier[to_dict] ()
identifier[d] . identifier[update] ( identifier[data] . identifier[copy] ())
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[get_fields] ():
keyword[if] identifier[f] [ literal[string] ] keyword[and] identifier[f] [ literal[string] ] keyword[in] identifier[d] :
identifier[v] = identifier[make_view_field] ( identifier[f] , identifier[self] . identifier[obj] , identifier[self] . identifier[types_convert_map] , identifier[self] . identifier[fields_convert_map] , identifier[d] [ identifier[f] [ literal[string] ]])
identifier[d] [ identifier[f] [ literal[string] ]]= identifier[v] [ literal[string] ]
keyword[return] identifier[d]
|
def prepare_static_data(self, data):
"""
If user defined static fields, then process them with visiable value
"""
d = self.obj.to_dict()
d.update(data.copy())
for f in self.get_fields():
if f['static'] and f['name'] in d:
v = make_view_field(f, self.obj, self.types_convert_map, self.fields_convert_map, d[f['name']])
d[f['name']] = v['display'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return d
|
def getEdgePoints(self):
"""
Returns a list with the coordinates of the points at the edge of the rectangle as tuples.
e.g.[(x1,y1),(x2,y2)]
The sorting is counterclockwise starting with the lower left corner.
Coordinates must be numbers or an exception will be thrown.
"""
result = [(float(self.get_x()),float(self.get_y()))]
result.append((float(self.get_x())+float(self.get_width()),float(self.get_y())))
result.append((float(self.get_x())+float(self.get_width()),float(self.get_y())+float(self.get_height())))
result.append((float(self.get_x()),float(self.get_y())+float(self.get_height())))
return result
|
def function[getEdgePoints, parameter[self]]:
constant[
Returns a list with the coordinates of the points at the edge of the rectangle as tuples.
e.g.[(x1,y1),(x2,y2)]
The sorting is counterclockwise starting with the lower left corner.
Coordinates must be numbers or an exception will be thrown.
]
variable[result] assign[=] list[[<ast.Tuple object at 0x7da18f720520>]]
call[name[result].append, parameter[tuple[[<ast.BinOp object at 0x7da18f7214b0>, <ast.Call object at 0x7da18f720be0>]]]]
call[name[result].append, parameter[tuple[[<ast.BinOp object at 0x7da18f721030>, <ast.BinOp object at 0x7da18f723550>]]]]
call[name[result].append, parameter[tuple[[<ast.Call object at 0x7da18f723130>, <ast.BinOp object at 0x7da18f720fd0>]]]]
return[name[result]]
|
keyword[def] identifier[getEdgePoints] ( identifier[self] ):
literal[string]
identifier[result] =[( identifier[float] ( identifier[self] . identifier[get_x] ()), identifier[float] ( identifier[self] . identifier[get_y] ()))]
identifier[result] . identifier[append] (( identifier[float] ( identifier[self] . identifier[get_x] ())+ identifier[float] ( identifier[self] . identifier[get_width] ()), identifier[float] ( identifier[self] . identifier[get_y] ())))
identifier[result] . identifier[append] (( identifier[float] ( identifier[self] . identifier[get_x] ())+ identifier[float] ( identifier[self] . identifier[get_width] ()), identifier[float] ( identifier[self] . identifier[get_y] ())+ identifier[float] ( identifier[self] . identifier[get_height] ())))
identifier[result] . identifier[append] (( identifier[float] ( identifier[self] . identifier[get_x] ()), identifier[float] ( identifier[self] . identifier[get_y] ())+ identifier[float] ( identifier[self] . identifier[get_height] ())))
keyword[return] identifier[result]
|
def getEdgePoints(self):
"""
Returns a list with the coordinates of the points at the edge of the rectangle as tuples.
e.g.[(x1,y1),(x2,y2)]
The sorting is counterclockwise starting with the lower left corner.
Coordinates must be numbers or an exception will be thrown.
"""
result = [(float(self.get_x()), float(self.get_y()))]
result.append((float(self.get_x()) + float(self.get_width()), float(self.get_y())))
result.append((float(self.get_x()) + float(self.get_width()), float(self.get_y()) + float(self.get_height())))
result.append((float(self.get_x()), float(self.get_y()) + float(self.get_height())))
return result
|
def _find_file(self, path, saltenv='base'):
'''
Locate the file path
'''
fnd = {'path': '',
'rel': ''}
if salt.utils.url.is_escaped(path):
# The path arguments are escaped
path = salt.utils.url.unescape(path)
for root in self.opts['pillar_roots'].get(saltenv, []):
full = os.path.join(root, path)
if os.path.isfile(full):
fnd['path'] = full
fnd['rel'] = path
return fnd
return fnd
|
def function[_find_file, parameter[self, path, saltenv]]:
constant[
Locate the file path
]
variable[fnd] assign[=] dictionary[[<ast.Constant object at 0x7da18f720400>, <ast.Constant object at 0x7da18f723130>], [<ast.Constant object at 0x7da18f723bb0>, <ast.Constant object at 0x7da18f721c30>]]
if call[name[salt].utils.url.is_escaped, parameter[name[path]]] begin[:]
variable[path] assign[=] call[name[salt].utils.url.unescape, parameter[name[path]]]
for taget[name[root]] in starred[call[call[name[self].opts][constant[pillar_roots]].get, parameter[name[saltenv], list[[]]]]] begin[:]
variable[full] assign[=] call[name[os].path.join, parameter[name[root], name[path]]]
if call[name[os].path.isfile, parameter[name[full]]] begin[:]
call[name[fnd]][constant[path]] assign[=] name[full]
call[name[fnd]][constant[rel]] assign[=] name[path]
return[name[fnd]]
return[name[fnd]]
|
keyword[def] identifier[_find_file] ( identifier[self] , identifier[path] , identifier[saltenv] = literal[string] ):
literal[string]
identifier[fnd] ={ literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[if] identifier[salt] . identifier[utils] . identifier[url] . identifier[is_escaped] ( identifier[path] ):
identifier[path] = identifier[salt] . identifier[utils] . identifier[url] . identifier[unescape] ( identifier[path] )
keyword[for] identifier[root] keyword[in] identifier[self] . identifier[opts] [ literal[string] ]. identifier[get] ( identifier[saltenv] ,[]):
identifier[full] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[path] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[full] ):
identifier[fnd] [ literal[string] ]= identifier[full]
identifier[fnd] [ literal[string] ]= identifier[path]
keyword[return] identifier[fnd]
keyword[return] identifier[fnd]
|
def _find_file(self, path, saltenv='base'):
"""
Locate the file path
"""
fnd = {'path': '', 'rel': ''}
if salt.utils.url.is_escaped(path):
# The path arguments are escaped
path = salt.utils.url.unescape(path) # depends on [control=['if'], data=[]]
for root in self.opts['pillar_roots'].get(saltenv, []):
full = os.path.join(root, path)
if os.path.isfile(full):
fnd['path'] = full
fnd['rel'] = path
return fnd # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['root']]
return fnd
|
def store_zonefile_data( self, fetched_zfhash, zonefile_data, min_block_height, peer_hostport, con, path ):
"""
Store the fetched zonefile (as a serialized string) to storage and cache it locally.
Update internal state to mark it present
Return True on success
Return False on error
"""
rc = add_atlas_zonefile_data( zonefile_data, self.zonefile_dir )
if not rc:
log.error("%s: Failed to store zonefile %s" % (self.hostport, fetched_zfhash))
else:
# stored! remember it
log.debug("%s: got %s from %s" % (self.hostport, fetched_zfhash, peer_hostport))
# update internal state
self.set_zonefile_present(fetched_zfhash, min_block_height, con=con, path=path)
return rc
|
def function[store_zonefile_data, parameter[self, fetched_zfhash, zonefile_data, min_block_height, peer_hostport, con, path]]:
constant[
Store the fetched zonefile (as a serialized string) to storage and cache it locally.
Update internal state to mark it present
Return True on success
Return False on error
]
variable[rc] assign[=] call[name[add_atlas_zonefile_data], parameter[name[zonefile_data], name[self].zonefile_dir]]
if <ast.UnaryOp object at 0x7da18bccbbb0> begin[:]
call[name[log].error, parameter[binary_operation[constant[%s: Failed to store zonefile %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18bccb7f0>, <ast.Name object at 0x7da18bccbd00>]]]]]
return[name[rc]]
|
keyword[def] identifier[store_zonefile_data] ( identifier[self] , identifier[fetched_zfhash] , identifier[zonefile_data] , identifier[min_block_height] , identifier[peer_hostport] , identifier[con] , identifier[path] ):
literal[string]
identifier[rc] = identifier[add_atlas_zonefile_data] ( identifier[zonefile_data] , identifier[self] . identifier[zonefile_dir] )
keyword[if] keyword[not] identifier[rc] :
identifier[log] . identifier[error] ( literal[string] %( identifier[self] . identifier[hostport] , identifier[fetched_zfhash] ))
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] %( identifier[self] . identifier[hostport] , identifier[fetched_zfhash] , identifier[peer_hostport] ))
identifier[self] . identifier[set_zonefile_present] ( identifier[fetched_zfhash] , identifier[min_block_height] , identifier[con] = identifier[con] , identifier[path] = identifier[path] )
keyword[return] identifier[rc]
|
def store_zonefile_data(self, fetched_zfhash, zonefile_data, min_block_height, peer_hostport, con, path):
"""
Store the fetched zonefile (as a serialized string) to storage and cache it locally.
Update internal state to mark it present
Return True on success
Return False on error
"""
rc = add_atlas_zonefile_data(zonefile_data, self.zonefile_dir)
if not rc:
log.error('%s: Failed to store zonefile %s' % (self.hostport, fetched_zfhash)) # depends on [control=['if'], data=[]]
else:
# stored! remember it
log.debug('%s: got %s from %s' % (self.hostport, fetched_zfhash, peer_hostport))
# update internal state
self.set_zonefile_present(fetched_zfhash, min_block_height, con=con, path=path)
return rc
|
def allowed(self, **kwargs):
"""
Get all available sender settings which could be used in "from" parameter of POST messages method.
Returns :class:`Source` object.
:Example:
allowed = client.sources.allowed()
:param country: Return sender settings available in specified country only. Optional.
"""
resp, instance = self.request("GET", self.uri, params=kwargs)
return self.load_instance(instance)
|
def function[allowed, parameter[self]]:
constant[
Get all available sender settings which could be used in "from" parameter of POST messages method.
Returns :class:`Source` object.
:Example:
allowed = client.sources.allowed()
:param country: Return sender settings available in specified country only. Optional.
]
<ast.Tuple object at 0x7da1b0c42c20> assign[=] call[name[self].request, parameter[constant[GET], name[self].uri]]
return[call[name[self].load_instance, parameter[name[instance]]]]
|
keyword[def] identifier[allowed] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[resp] , identifier[instance] = identifier[self] . identifier[request] ( literal[string] , identifier[self] . identifier[uri] , identifier[params] = identifier[kwargs] )
keyword[return] identifier[self] . identifier[load_instance] ( identifier[instance] )
|
def allowed(self, **kwargs):
"""
Get all available sender settings which could be used in "from" parameter of POST messages method.
Returns :class:`Source` object.
:Example:
allowed = client.sources.allowed()
:param country: Return sender settings available in specified country only. Optional.
"""
(resp, instance) = self.request('GET', self.uri, params=kwargs)
return self.load_instance(instance)
|
def json(value,
schema = None,
allow_empty = False,
json_serializer = None,
**kwargs):
"""Validate that ``value`` conforms to the supplied JSON Schema.
.. note::
``schema`` supports JSON Schema Drafts 3 - 7. Unless the JSON Schema indicates the
meta-schema using a ``$schema`` property, the schema will be assumed to conform to
Draft 7.
.. hint::
If either ``value`` or ``schema`` is a string, this validator will assume it is a
JSON object and try to convert it into a :class:`dict <python:dict>`.
You can override the JSON serializer used by passing it to the
``json_serializer`` property. By default, will utilize the Python
:class:`json <json>` encoder/decoder.
:param value: The value to validate.
:param schema: An optional JSON Schema against which ``value`` will be validated.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param json_serializer: The JSON encoder/decoder to use to deserialize a
string passed in ``value``. If not supplied, will default to the Python
:class:`json <python:json>` encoder/decoder.
:type json_serializer: callable
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`dict <python:dict>` / :class:`list <python:list>` of
:class:`dict <python:dict>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`dict <python:dict>`
:raises NotJSONError: if ``value`` cannot be deserialized from JSON
:raises NotJSONSchemaError: if ``schema`` is not a valid JSON Schema object
:raises JSONValidationError: if ``value`` does not validate against the JSON Schema
"""
original_value = value
original_schema = schema
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if not json_serializer:
json_serializer = json_
if isinstance(value, str):
try:
value = json_serializer.loads(value)
except Exception:
raise errors.CannotCoerceError(
'value (%s) cannot be deserialized from JSON' % original_value
)
if isinstance(schema, str):
try:
schema = dict(schema,
allow_empty = allow_empty,
json_serializer = json_serializer,
**kwargs)
except Exception:
raise errors.CannotCoerceError(
'schema (%s) cannot be coerced to a dict' % original_schema
)
if not isinstance(value, (list, dict_)):
raise errors.NotJSONError('value (%s) is not a JSON object' % original_value)
if original_schema and not isinstance(schema, dict_):
raise errors.NotJSONError('schema (%s) is not a JSON object' % original_schema)
if not schema:
return value
try:
jsonschema.validate(value, schema)
except jsonschema.exceptions.ValidationError as error:
raise errors.JSONValidationError(error.message)
except jsonschema.exceptions.SchemaError as error:
raise errors.NotJSONSchemaError(error.message)
return value
|
def function[json, parameter[value, schema, allow_empty, json_serializer]]:
constant[Validate that ``value`` conforms to the supplied JSON Schema.
.. note::
``schema`` supports JSON Schema Drafts 3 - 7. Unless the JSON Schema indicates the
meta-schema using a ``$schema`` property, the schema will be assumed to conform to
Draft 7.
.. hint::
If either ``value`` or ``schema`` is a string, this validator will assume it is a
JSON object and try to convert it into a :class:`dict <python:dict>`.
You can override the JSON serializer used by passing it to the
``json_serializer`` property. By default, will utilize the Python
:class:`json <json>` encoder/decoder.
:param value: The value to validate.
:param schema: An optional JSON Schema against which ``value`` will be validated.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param json_serializer: The JSON encoder/decoder to use to deserialize a
string passed in ``value``. If not supplied, will default to the Python
:class:`json <python:json>` encoder/decoder.
:type json_serializer: callable
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`dict <python:dict>` / :class:`list <python:list>` of
:class:`dict <python:dict>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`dict <python:dict>`
:raises NotJSONError: if ``value`` cannot be deserialized from JSON
:raises NotJSONSchemaError: if ``schema`` is not a valid JSON Schema object
:raises JSONValidationError: if ``value`` does not validate against the JSON Schema
]
variable[original_value] assign[=] name[value]
variable[original_schema] assign[=] name[schema]
if <ast.BoolOp object at 0x7da1b065a320> begin[:]
<ast.Raise object at 0x7da1b065a770>
if <ast.UnaryOp object at 0x7da1b065a410> begin[:]
variable[json_serializer] assign[=] name[json_]
if call[name[isinstance], parameter[name[value], name[str]]] begin[:]
<ast.Try object at 0x7da1b065a6b0>
if call[name[isinstance], parameter[name[schema], name[str]]] begin[:]
<ast.Try object at 0x7da1b0659600>
if <ast.UnaryOp object at 0x7da1b06590f0> begin[:]
<ast.Raise object at 0x7da1b0659390>
if <ast.BoolOp object at 0x7da1b0658fd0> begin[:]
<ast.Raise object at 0x7da1b0659180>
if <ast.UnaryOp object at 0x7da1b06593c0> begin[:]
return[name[value]]
<ast.Try object at 0x7da1b0659570>
return[name[value]]
|
keyword[def] identifier[json] ( identifier[value] ,
identifier[schema] = keyword[None] ,
identifier[allow_empty] = keyword[False] ,
identifier[json_serializer] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[original_value] = identifier[value]
identifier[original_schema] = identifier[schema]
keyword[if] keyword[not] identifier[value] keyword[and] keyword[not] identifier[allow_empty] :
keyword[raise] identifier[errors] . identifier[EmptyValueError] ( literal[string] % identifier[value] )
keyword[elif] keyword[not] identifier[value] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[json_serializer] :
identifier[json_serializer] = identifier[json_]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ):
keyword[try] :
identifier[value] = identifier[json_serializer] . identifier[loads] ( identifier[value] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[errors] . identifier[CannotCoerceError] (
literal[string] % identifier[original_value]
)
keyword[if] identifier[isinstance] ( identifier[schema] , identifier[str] ):
keyword[try] :
identifier[schema] = identifier[dict] ( identifier[schema] ,
identifier[allow_empty] = identifier[allow_empty] ,
identifier[json_serializer] = identifier[json_serializer] ,
** identifier[kwargs] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[errors] . identifier[CannotCoerceError] (
literal[string] % identifier[original_schema]
)
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[dict_] )):
keyword[raise] identifier[errors] . identifier[NotJSONError] ( literal[string] % identifier[original_value] )
keyword[if] identifier[original_schema] keyword[and] keyword[not] identifier[isinstance] ( identifier[schema] , identifier[dict_] ):
keyword[raise] identifier[errors] . identifier[NotJSONError] ( literal[string] % identifier[original_schema] )
keyword[if] keyword[not] identifier[schema] :
keyword[return] identifier[value]
keyword[try] :
identifier[jsonschema] . identifier[validate] ( identifier[value] , identifier[schema] )
keyword[except] identifier[jsonschema] . identifier[exceptions] . identifier[ValidationError] keyword[as] identifier[error] :
keyword[raise] identifier[errors] . identifier[JSONValidationError] ( identifier[error] . identifier[message] )
keyword[except] identifier[jsonschema] . identifier[exceptions] . identifier[SchemaError] keyword[as] identifier[error] :
keyword[raise] identifier[errors] . identifier[NotJSONSchemaError] ( identifier[error] . identifier[message] )
keyword[return] identifier[value]
|
def json(value, schema=None, allow_empty=False, json_serializer=None, **kwargs):
"""Validate that ``value`` conforms to the supplied JSON Schema.
.. note::
``schema`` supports JSON Schema Drafts 3 - 7. Unless the JSON Schema indicates the
meta-schema using a ``$schema`` property, the schema will be assumed to conform to
Draft 7.
.. hint::
If either ``value`` or ``schema`` is a string, this validator will assume it is a
JSON object and try to convert it into a :class:`dict <python:dict>`.
You can override the JSON serializer used by passing it to the
``json_serializer`` property. By default, will utilize the Python
:class:`json <json>` encoder/decoder.
:param value: The value to validate.
:param schema: An optional JSON Schema against which ``value`` will be validated.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param json_serializer: The JSON encoder/decoder to use to deserialize a
string passed in ``value``. If not supplied, will default to the Python
:class:`json <python:json>` encoder/decoder.
:type json_serializer: callable
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`dict <python:dict>` / :class:`list <python:list>` of
:class:`dict <python:dict>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`dict <python:dict>`
:raises NotJSONError: if ``value`` cannot be deserialized from JSON
:raises NotJSONSchemaError: if ``schema`` is not a valid JSON Schema object
:raises JSONValidationError: if ``value`` does not validate against the JSON Schema
"""
original_value = value
original_schema = schema
if not value and (not allow_empty):
raise errors.EmptyValueError('value (%s) was empty' % value) # depends on [control=['if'], data=[]]
elif not value:
return None # depends on [control=['if'], data=[]]
if not json_serializer:
json_serializer = json_ # depends on [control=['if'], data=[]]
if isinstance(value, str):
try:
value = json_serializer.loads(value) # depends on [control=['try'], data=[]]
except Exception:
raise errors.CannotCoerceError('value (%s) cannot be deserialized from JSON' % original_value) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if isinstance(schema, str):
try:
schema = dict(schema, allow_empty=allow_empty, json_serializer=json_serializer, **kwargs) # depends on [control=['try'], data=[]]
except Exception:
raise errors.CannotCoerceError('schema (%s) cannot be coerced to a dict' % original_schema) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if not isinstance(value, (list, dict_)):
raise errors.NotJSONError('value (%s) is not a JSON object' % original_value) # depends on [control=['if'], data=[]]
if original_schema and (not isinstance(schema, dict_)):
raise errors.NotJSONError('schema (%s) is not a JSON object' % original_schema) # depends on [control=['if'], data=[]]
if not schema:
return value # depends on [control=['if'], data=[]]
try:
jsonschema.validate(value, schema) # depends on [control=['try'], data=[]]
except jsonschema.exceptions.ValidationError as error:
raise errors.JSONValidationError(error.message) # depends on [control=['except'], data=['error']]
except jsonschema.exceptions.SchemaError as error:
raise errors.NotJSONSchemaError(error.message) # depends on [control=['except'], data=['error']]
return value
|
def open_link(self, url):
"""
Open a media link using the definitions from the user's mailcap file.
Most urls are parsed using their file extension, but special cases
exist for websites that are prevalent on reddit such as Imgur and
Gfycat. If there are no valid mailcap definitions, RTV will fall back
to using the default webbrowser.
RTV checks for certain mailcap fields to determine how to open a link:
- If ``copiousoutput`` is specified, the curses application will
be paused and stdout will be piped to the system pager.
- If `needsterminal`` is specified, the curses application will
yield terminal control to the subprocess until it has exited.
- Otherwise, we assume that the subprocess is meant to open a new
x-window, and we swallow all stdout output.
Examples:
Stream youtube videos with VLC
Browse images and imgur albums with feh
Watch .webm videos through your terminal with mplayer
View images directly in your terminal with fbi or w3m
Play .mp3 files with sox player
Send HTML pages your pager using to html2text
...anything is possible!
"""
if not self.config['enable_media']:
self.open_browser(url)
return
try:
with self.loader('Checking link', catch_exception=False):
command, entry = self.get_mailcap_entry(url)
except exceptions.MailcapEntryNotFound:
self.open_browser(url)
return
_logger.info('Executing command: %s', command)
needs_terminal = 'needsterminal' in entry
copious_output = 'copiousoutput' in entry
if needs_terminal or copious_output:
# Blocking, pause rtv until the process returns
with self.suspend():
os.system('clear')
p = subprocess.Popen(
[command], stderr=subprocess.PIPE,
universal_newlines=True, shell=True)
_, stderr = p.communicate()
if copious_output:
six.moves.input('Press any key to continue')
code = p.poll()
if code != 0:
_logger.warning(stderr)
self.show_notification(
'Program exited with status={0}\n{1}'.format(
code, stderr.strip()), style='Error')
else:
# Non-blocking, open a background process
with self.loader('Opening page', delay=0):
p = subprocess.Popen(
[command], shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Wait a little while to make sure that the command doesn't
# exit with an error. This isn't perfect, but it should be good
# enough to catch invalid commands.
time.sleep(1.0)
code = p.poll()
if code is not None and code != 0:
_, stderr = p.communicate()
raise exceptions.BrowserError(
'Program exited with status={0}\n{1}'.format(
code, stderr.strip()))
# Spin off a thread with p.communicate() to avoid subprocess
# hang when the stodout/stderr PIPE gets filled up. This
# behavior was discovered when opening long gifs with mpv
# because mpv sends a progress bar to stderr.
# https://thraxil.org/users/anders/posts/2008/03/13/
threading.Thread(target=p.communicate).start()
|
def function[open_link, parameter[self, url]]:
constant[
Open a media link using the definitions from the user's mailcap file.
Most urls are parsed using their file extension, but special cases
exist for websites that are prevalent on reddit such as Imgur and
Gfycat. If there are no valid mailcap definitions, RTV will fall back
to using the default webbrowser.
RTV checks for certain mailcap fields to determine how to open a link:
- If ``copiousoutput`` is specified, the curses application will
be paused and stdout will be piped to the system pager.
- If `needsterminal`` is specified, the curses application will
yield terminal control to the subprocess until it has exited.
- Otherwise, we assume that the subprocess is meant to open a new
x-window, and we swallow all stdout output.
Examples:
Stream youtube videos with VLC
Browse images and imgur albums with feh
Watch .webm videos through your terminal with mplayer
View images directly in your terminal with fbi or w3m
Play .mp3 files with sox player
Send HTML pages your pager using to html2text
...anything is possible!
]
if <ast.UnaryOp object at 0x7da2044c0b50> begin[:]
call[name[self].open_browser, parameter[name[url]]]
return[None]
<ast.Try object at 0x7da2044c14e0>
call[name[_logger].info, parameter[constant[Executing command: %s], name[command]]]
variable[needs_terminal] assign[=] compare[constant[needsterminal] in name[entry]]
variable[copious_output] assign[=] compare[constant[copiousoutput] in name[entry]]
if <ast.BoolOp object at 0x7da2044c23e0> begin[:]
with call[name[self].suspend, parameter[]] begin[:]
call[name[os].system, parameter[constant[clear]]]
variable[p] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Name object at 0x7da2044c0640>]]]]
<ast.Tuple object at 0x7da2044c1360> assign[=] call[name[p].communicate, parameter[]]
if name[copious_output] begin[:]
call[name[six].moves.input, parameter[constant[Press any key to continue]]]
variable[code] assign[=] call[name[p].poll, parameter[]]
if compare[name[code] not_equal[!=] constant[0]] begin[:]
call[name[_logger].warning, parameter[name[stderr]]]
call[name[self].show_notification, parameter[call[constant[Program exited with status={0}
{1}].format, parameter[name[code], call[name[stderr].strip, parameter[]]]]]]
|
keyword[def] identifier[open_link] ( identifier[self] , identifier[url] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[config] [ literal[string] ]:
identifier[self] . identifier[open_browser] ( identifier[url] )
keyword[return]
keyword[try] :
keyword[with] identifier[self] . identifier[loader] ( literal[string] , identifier[catch_exception] = keyword[False] ):
identifier[command] , identifier[entry] = identifier[self] . identifier[get_mailcap_entry] ( identifier[url] )
keyword[except] identifier[exceptions] . identifier[MailcapEntryNotFound] :
identifier[self] . identifier[open_browser] ( identifier[url] )
keyword[return]
identifier[_logger] . identifier[info] ( literal[string] , identifier[command] )
identifier[needs_terminal] = literal[string] keyword[in] identifier[entry]
identifier[copious_output] = literal[string] keyword[in] identifier[entry]
keyword[if] identifier[needs_terminal] keyword[or] identifier[copious_output] :
keyword[with] identifier[self] . identifier[suspend] ():
identifier[os] . identifier[system] ( literal[string] )
identifier[p] = identifier[subprocess] . identifier[Popen] (
[ identifier[command] ], identifier[stderr] = identifier[subprocess] . identifier[PIPE] ,
identifier[universal_newlines] = keyword[True] , identifier[shell] = keyword[True] )
identifier[_] , identifier[stderr] = identifier[p] . identifier[communicate] ()
keyword[if] identifier[copious_output] :
identifier[six] . identifier[moves] . identifier[input] ( literal[string] )
identifier[code] = identifier[p] . identifier[poll] ()
keyword[if] identifier[code] != literal[int] :
identifier[_logger] . identifier[warning] ( identifier[stderr] )
identifier[self] . identifier[show_notification] (
literal[string] . identifier[format] (
identifier[code] , identifier[stderr] . identifier[strip] ()), identifier[style] = literal[string] )
keyword[else] :
keyword[with] identifier[self] . identifier[loader] ( literal[string] , identifier[delay] = literal[int] ):
identifier[p] = identifier[subprocess] . identifier[Popen] (
[ identifier[command] ], identifier[shell] = keyword[True] , identifier[universal_newlines] = keyword[True] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] )
identifier[time] . identifier[sleep] ( literal[int] )
identifier[code] = identifier[p] . identifier[poll] ()
keyword[if] identifier[code] keyword[is] keyword[not] keyword[None] keyword[and] identifier[code] != literal[int] :
identifier[_] , identifier[stderr] = identifier[p] . identifier[communicate] ()
keyword[raise] identifier[exceptions] . identifier[BrowserError] (
literal[string] . identifier[format] (
identifier[code] , identifier[stderr] . identifier[strip] ()))
identifier[threading] . identifier[Thread] ( identifier[target] = identifier[p] . identifier[communicate] ). identifier[start] ()
|
def open_link(self, url):
"""
Open a media link using the definitions from the user's mailcap file.
Most urls are parsed using their file extension, but special cases
exist for websites that are prevalent on reddit such as Imgur and
Gfycat. If there are no valid mailcap definitions, RTV will fall back
to using the default webbrowser.
RTV checks for certain mailcap fields to determine how to open a link:
- If ``copiousoutput`` is specified, the curses application will
be paused and stdout will be piped to the system pager.
- If `needsterminal`` is specified, the curses application will
yield terminal control to the subprocess until it has exited.
- Otherwise, we assume that the subprocess is meant to open a new
x-window, and we swallow all stdout output.
Examples:
Stream youtube videos with VLC
Browse images and imgur albums with feh
Watch .webm videos through your terminal with mplayer
View images directly in your terminal with fbi or w3m
Play .mp3 files with sox player
Send HTML pages your pager using to html2text
...anything is possible!
"""
if not self.config['enable_media']:
self.open_browser(url)
return # depends on [control=['if'], data=[]]
try:
with self.loader('Checking link', catch_exception=False):
(command, entry) = self.get_mailcap_entry(url) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except exceptions.MailcapEntryNotFound:
self.open_browser(url)
return # depends on [control=['except'], data=[]]
_logger.info('Executing command: %s', command)
needs_terminal = 'needsterminal' in entry
copious_output = 'copiousoutput' in entry
if needs_terminal or copious_output:
# Blocking, pause rtv until the process returns
with self.suspend():
os.system('clear')
p = subprocess.Popen([command], stderr=subprocess.PIPE, universal_newlines=True, shell=True)
(_, stderr) = p.communicate()
if copious_output:
six.moves.input('Press any key to continue') # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
code = p.poll()
if code != 0:
_logger.warning(stderr)
self.show_notification('Program exited with status={0}\n{1}'.format(code, stderr.strip()), style='Error') # depends on [control=['if'], data=['code']] # depends on [control=['if'], data=[]]
else:
# Non-blocking, open a background process
with self.loader('Opening page', delay=0):
p = subprocess.Popen([command], shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Wait a little while to make sure that the command doesn't
# exit with an error. This isn't perfect, but it should be good
# enough to catch invalid commands.
time.sleep(1.0)
code = p.poll()
if code is not None and code != 0:
(_, stderr) = p.communicate()
raise exceptions.BrowserError('Program exited with status={0}\n{1}'.format(code, stderr.strip())) # depends on [control=['if'], data=[]]
# Spin off a thread with p.communicate() to avoid subprocess
# hang when the stodout/stderr PIPE gets filled up. This
# behavior was discovered when opening long gifs with mpv
# because mpv sends a progress bar to stderr.
# https://thraxil.org/users/anders/posts/2008/03/13/
threading.Thread(target=p.communicate).start() # depends on [control=['with'], data=[]]
|
def execute(self, *args, **kwargs):
"""
Executes the action and returns the result.
You dont have to call this function directly as the class is callable (implements __call__)
you just call the @action marked function as normal.
@action
def my_action(p1)
print(p1)
my_action("x")
"""
timeout = kwargs.pop("timeout", -1)
execute_async = kwargs.pop("run_async", False)
result = None
if self._action_lock.acquire(False):
try:
self.spine.trigger_event("actionStarted", self.action_id)
if timeout == -1 and not execute_async:
result = self._execute(*args, **kwargs)
else:
thread = _ActionThread(self, args, kwargs)
thread.start()
if not execute_async:
thread.join(timeout)
if thread.is_alive():
result = None
#self._send_message("failed", "Timedout in call to action")
raise TimeoutError("Timeout in call to action: " + self.action_id)
else:
result = thread.result
else:
result = thread
return result
except Exception as ex:
print(ex)
self._action_lock.release()
finally:
pass
# self._action_lock.release()
else:
if not self._action_lock.acquire(True, timeout):
return None
self._action_lock.release()
return self._last_result
|
def function[execute, parameter[self]]:
constant[
Executes the action and returns the result.
You dont have to call this function directly as the class is callable (implements __call__)
you just call the @action marked function as normal.
@action
def my_action(p1)
print(p1)
my_action("x")
]
variable[timeout] assign[=] call[name[kwargs].pop, parameter[constant[timeout], <ast.UnaryOp object at 0x7da20e74be20>]]
variable[execute_async] assign[=] call[name[kwargs].pop, parameter[constant[run_async], constant[False]]]
variable[result] assign[=] constant[None]
if call[name[self]._action_lock.acquire, parameter[constant[False]]] begin[:]
<ast.Try object at 0x7da20e748a90>
|
keyword[def] identifier[execute] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[timeout] = identifier[kwargs] . identifier[pop] ( literal[string] ,- literal[int] )
identifier[execute_async] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[result] = keyword[None]
keyword[if] identifier[self] . identifier[_action_lock] . identifier[acquire] ( keyword[False] ):
keyword[try] :
identifier[self] . identifier[spine] . identifier[trigger_event] ( literal[string] , identifier[self] . identifier[action_id] )
keyword[if] identifier[timeout] ==- literal[int] keyword[and] keyword[not] identifier[execute_async] :
identifier[result] = identifier[self] . identifier[_execute] (* identifier[args] ,** identifier[kwargs] )
keyword[else] :
identifier[thread] = identifier[_ActionThread] ( identifier[self] , identifier[args] , identifier[kwargs] )
identifier[thread] . identifier[start] ()
keyword[if] keyword[not] identifier[execute_async] :
identifier[thread] . identifier[join] ( identifier[timeout] )
keyword[if] identifier[thread] . identifier[is_alive] ():
identifier[result] = keyword[None]
keyword[raise] identifier[TimeoutError] ( literal[string] + identifier[self] . identifier[action_id] )
keyword[else] :
identifier[result] = identifier[thread] . identifier[result]
keyword[else] :
identifier[result] = identifier[thread]
keyword[return] identifier[result]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[print] ( identifier[ex] )
identifier[self] . identifier[_action_lock] . identifier[release] ()
keyword[finally] :
keyword[pass]
keyword[else] :
keyword[if] keyword[not] identifier[self] . identifier[_action_lock] . identifier[acquire] ( keyword[True] , identifier[timeout] ):
keyword[return] keyword[None]
identifier[self] . identifier[_action_lock] . identifier[release] ()
keyword[return] identifier[self] . identifier[_last_result]
|
def execute(self, *args, **kwargs):
"""
Executes the action and returns the result.
You dont have to call this function directly as the class is callable (implements __call__)
you just call the @action marked function as normal.
@action
def my_action(p1)
print(p1)
my_action("x")
"""
timeout = kwargs.pop('timeout', -1)
execute_async = kwargs.pop('run_async', False)
result = None
if self._action_lock.acquire(False):
try:
self.spine.trigger_event('actionStarted', self.action_id)
if timeout == -1 and (not execute_async):
result = self._execute(*args, **kwargs) # depends on [control=['if'], data=[]]
else:
thread = _ActionThread(self, args, kwargs)
thread.start()
if not execute_async:
thread.join(timeout)
if thread.is_alive():
result = None #self._send_message("failed", "Timedout in call to action")
raise TimeoutError('Timeout in call to action: ' + self.action_id) # depends on [control=['if'], data=[]]
else:
result = thread.result # depends on [control=['if'], data=[]]
else:
result = thread
return result # depends on [control=['try'], data=[]]
except Exception as ex:
print(ex)
self._action_lock.release() # depends on [control=['except'], data=['ex']]
finally:
pass # depends on [control=['if'], data=[]]
else:
# self._action_lock.release()
if not self._action_lock.acquire(True, timeout):
return None # depends on [control=['if'], data=[]]
self._action_lock.release()
return self._last_result
|
def property_observer(self, name):
"""Function decorator to register a property observer. See ``MPV.observe_property`` for details."""
def wrapper(fun):
self.observe_property(name, fun)
fun.unobserve_mpv_properties = lambda: self.unobserve_property(name, fun)
return fun
return wrapper
|
def function[property_observer, parameter[self, name]]:
constant[Function decorator to register a property observer. See ``MPV.observe_property`` for details.]
def function[wrapper, parameter[fun]]:
call[name[self].observe_property, parameter[name[name], name[fun]]]
name[fun].unobserve_mpv_properties assign[=] <ast.Lambda object at 0x7da204345cc0>
return[name[fun]]
return[name[wrapper]]
|
keyword[def] identifier[property_observer] ( identifier[self] , identifier[name] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[fun] ):
identifier[self] . identifier[observe_property] ( identifier[name] , identifier[fun] )
identifier[fun] . identifier[unobserve_mpv_properties] = keyword[lambda] : identifier[self] . identifier[unobserve_property] ( identifier[name] , identifier[fun] )
keyword[return] identifier[fun]
keyword[return] identifier[wrapper]
|
def property_observer(self, name):
"""Function decorator to register a property observer. See ``MPV.observe_property`` for details."""
def wrapper(fun):
self.observe_property(name, fun)
fun.unobserve_mpv_properties = lambda : self.unobserve_property(name, fun)
return fun
return wrapper
|
def validate_unit(input_unit):
"""Validate unit.
To be compatible with existing SYNPHOT data files:
* 'angstroms' and 'inversemicrons' are accepted although
unrecognized by astropy units
* 'transmission', 'extinction', and 'emissivity' are
converted to astropy dimensionless unit
Parameters
----------
input_unit : str or `~astropy.units.core.Unit`
Unit to validate.
Returns
-------
output_unit : `~astropy.units.core.Unit`
Validated unit.
Raises
------
synphot.exceptions.SynphotError
Invalid unit.
"""
if isinstance(input_unit, str):
input_unit_lowcase = input_unit.lower()
# Backward-compatibility
if input_unit_lowcase == 'angstroms':
output_unit = u.AA
elif input_unit_lowcase == 'inversemicrons':
output_unit = u.micron ** -1
elif input_unit_lowcase in ('transmission', 'extinction',
'emissivity'):
output_unit = THROUGHPUT
elif input_unit_lowcase == 'jy':
output_unit = u.Jy
# Work around mag unit limitations
elif input_unit_lowcase in ('stmag', 'mag(st)'):
output_unit = u.STmag
elif input_unit_lowcase in ('abmag', 'mag(ab)'):
output_unit = u.ABmag
else:
try: # astropy.units is case-sensitive
output_unit = u.Unit(input_unit)
except ValueError: # synphot is case-insensitive
output_unit = u.Unit(input_unit_lowcase)
elif isinstance(input_unit, (u.UnitBase, u.LogUnit)):
output_unit = input_unit
else:
raise exceptions.SynphotError(
'{0} must be a recognized string or '
'astropy.units.core.Unit'.format(input_unit))
return output_unit
|
def function[validate_unit, parameter[input_unit]]:
constant[Validate unit.
To be compatible with existing SYNPHOT data files:
* 'angstroms' and 'inversemicrons' are accepted although
unrecognized by astropy units
* 'transmission', 'extinction', and 'emissivity' are
converted to astropy dimensionless unit
Parameters
----------
input_unit : str or `~astropy.units.core.Unit`
Unit to validate.
Returns
-------
output_unit : `~astropy.units.core.Unit`
Validated unit.
Raises
------
synphot.exceptions.SynphotError
Invalid unit.
]
if call[name[isinstance], parameter[name[input_unit], name[str]]] begin[:]
variable[input_unit_lowcase] assign[=] call[name[input_unit].lower, parameter[]]
if compare[name[input_unit_lowcase] equal[==] constant[angstroms]] begin[:]
variable[output_unit] assign[=] name[u].AA
return[name[output_unit]]
|
keyword[def] identifier[validate_unit] ( identifier[input_unit] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[input_unit] , identifier[str] ):
identifier[input_unit_lowcase] = identifier[input_unit] . identifier[lower] ()
keyword[if] identifier[input_unit_lowcase] == literal[string] :
identifier[output_unit] = identifier[u] . identifier[AA]
keyword[elif] identifier[input_unit_lowcase] == literal[string] :
identifier[output_unit] = identifier[u] . identifier[micron] **- literal[int]
keyword[elif] identifier[input_unit_lowcase] keyword[in] ( literal[string] , literal[string] ,
literal[string] ):
identifier[output_unit] = identifier[THROUGHPUT]
keyword[elif] identifier[input_unit_lowcase] == literal[string] :
identifier[output_unit] = identifier[u] . identifier[Jy]
keyword[elif] identifier[input_unit_lowcase] keyword[in] ( literal[string] , literal[string] ):
identifier[output_unit] = identifier[u] . identifier[STmag]
keyword[elif] identifier[input_unit_lowcase] keyword[in] ( literal[string] , literal[string] ):
identifier[output_unit] = identifier[u] . identifier[ABmag]
keyword[else] :
keyword[try] :
identifier[output_unit] = identifier[u] . identifier[Unit] ( identifier[input_unit] )
keyword[except] identifier[ValueError] :
identifier[output_unit] = identifier[u] . identifier[Unit] ( identifier[input_unit_lowcase] )
keyword[elif] identifier[isinstance] ( identifier[input_unit] ,( identifier[u] . identifier[UnitBase] , identifier[u] . identifier[LogUnit] )):
identifier[output_unit] = identifier[input_unit]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[SynphotError] (
literal[string]
literal[string] . identifier[format] ( identifier[input_unit] ))
keyword[return] identifier[output_unit]
|
def validate_unit(input_unit):
"""Validate unit.
To be compatible with existing SYNPHOT data files:
* 'angstroms' and 'inversemicrons' are accepted although
unrecognized by astropy units
* 'transmission', 'extinction', and 'emissivity' are
converted to astropy dimensionless unit
Parameters
----------
input_unit : str or `~astropy.units.core.Unit`
Unit to validate.
Returns
-------
output_unit : `~astropy.units.core.Unit`
Validated unit.
Raises
------
synphot.exceptions.SynphotError
Invalid unit.
"""
if isinstance(input_unit, str):
input_unit_lowcase = input_unit.lower()
# Backward-compatibility
if input_unit_lowcase == 'angstroms':
output_unit = u.AA # depends on [control=['if'], data=[]]
elif input_unit_lowcase == 'inversemicrons':
output_unit = u.micron ** (-1) # depends on [control=['if'], data=[]]
elif input_unit_lowcase in ('transmission', 'extinction', 'emissivity'):
output_unit = THROUGHPUT # depends on [control=['if'], data=[]]
elif input_unit_lowcase == 'jy':
output_unit = u.Jy # depends on [control=['if'], data=[]]
# Work around mag unit limitations
elif input_unit_lowcase in ('stmag', 'mag(st)'):
output_unit = u.STmag # depends on [control=['if'], data=[]]
elif input_unit_lowcase in ('abmag', 'mag(ab)'):
output_unit = u.ABmag # depends on [control=['if'], data=[]]
else:
try: # astropy.units is case-sensitive
output_unit = u.Unit(input_unit) # depends on [control=['try'], data=[]]
except ValueError: # synphot is case-insensitive
output_unit = u.Unit(input_unit_lowcase) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(input_unit, (u.UnitBase, u.LogUnit)):
output_unit = input_unit # depends on [control=['if'], data=[]]
else:
raise exceptions.SynphotError('{0} must be a recognized string or astropy.units.core.Unit'.format(input_unit))
return output_unit
|
def _loadfilepath(self, filepath, **kwargs):
"""This loads a geojson file into a geojson python
dictionary using the json module.
Note: to load with a different text encoding use the encoding argument.
"""
with open(filepath, "r") as f:
data = json.load(f, **kwargs)
return data
|
def function[_loadfilepath, parameter[self, filepath]]:
constant[This loads a geojson file into a geojson python
dictionary using the json module.
Note: to load with a different text encoding use the encoding argument.
]
with call[name[open], parameter[name[filepath], constant[r]]] begin[:]
variable[data] assign[=] call[name[json].load, parameter[name[f]]]
return[name[data]]
|
keyword[def] identifier[_loadfilepath] ( identifier[self] , identifier[filepath] ,** identifier[kwargs] ):
literal[string]
keyword[with] identifier[open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[f] :
identifier[data] = identifier[json] . identifier[load] ( identifier[f] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def _loadfilepath(self, filepath, **kwargs):
"""This loads a geojson file into a geojson python
dictionary using the json module.
Note: to load with a different text encoding use the encoding argument.
"""
with open(filepath, 'r') as f:
data = json.load(f, **kwargs) # depends on [control=['with'], data=['f']]
return data
|
def updateSolutionTerminal(self):
'''
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
None
Returns
-------
None
'''
self.solution_terminal.vFunc = ValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPfunc = MargValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPPfunc = MargMargValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.hNrm = 0.0 # Don't track normalized human wealth
self.solution_terminal.hLvl = lambda p : np.zeros_like(p) # But do track absolute human wealth by persistent income
self.solution_terminal.mLvlMin = lambda p : np.zeros_like(p)
|
def function[updateSolutionTerminal, parameter[self]]:
constant[
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
None
Returns
-------
None
]
name[self].solution_terminal.vFunc assign[=] call[name[ValueFunc2D], parameter[name[self].cFunc_terminal_, name[self].CRRA]]
name[self].solution_terminal.vPfunc assign[=] call[name[MargValueFunc2D], parameter[name[self].cFunc_terminal_, name[self].CRRA]]
name[self].solution_terminal.vPPfunc assign[=] call[name[MargMargValueFunc2D], parameter[name[self].cFunc_terminal_, name[self].CRRA]]
name[self].solution_terminal.hNrm assign[=] constant[0.0]
name[self].solution_terminal.hLvl assign[=] <ast.Lambda object at 0x7da1b084f310>
name[self].solution_terminal.mLvlMin assign[=] <ast.Lambda object at 0x7da1b084d090>
|
keyword[def] identifier[updateSolutionTerminal] ( identifier[self] ):
literal[string]
identifier[self] . identifier[solution_terminal] . identifier[vFunc] = identifier[ValueFunc2D] ( identifier[self] . identifier[cFunc_terminal_] , identifier[self] . identifier[CRRA] )
identifier[self] . identifier[solution_terminal] . identifier[vPfunc] = identifier[MargValueFunc2D] ( identifier[self] . identifier[cFunc_terminal_] , identifier[self] . identifier[CRRA] )
identifier[self] . identifier[solution_terminal] . identifier[vPPfunc] = identifier[MargMargValueFunc2D] ( identifier[self] . identifier[cFunc_terminal_] , identifier[self] . identifier[CRRA] )
identifier[self] . identifier[solution_terminal] . identifier[hNrm] = literal[int]
identifier[self] . identifier[solution_terminal] . identifier[hLvl] = keyword[lambda] identifier[p] : identifier[np] . identifier[zeros_like] ( identifier[p] )
identifier[self] . identifier[solution_terminal] . identifier[mLvlMin] = keyword[lambda] identifier[p] : identifier[np] . identifier[zeros_like] ( identifier[p] )
|
def updateSolutionTerminal(self):
"""
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
None
Returns
-------
None
"""
self.solution_terminal.vFunc = ValueFunc2D(self.cFunc_terminal_, self.CRRA)
self.solution_terminal.vPfunc = MargValueFunc2D(self.cFunc_terminal_, self.CRRA)
self.solution_terminal.vPPfunc = MargMargValueFunc2D(self.cFunc_terminal_, self.CRRA)
self.solution_terminal.hNrm = 0.0 # Don't track normalized human wealth
self.solution_terminal.hLvl = lambda p: np.zeros_like(p) # But do track absolute human wealth by persistent income
self.solution_terminal.mLvlMin = lambda p: np.zeros_like(p)
|
def read_csv(self, dtype=False, parse_dates=True, *args, **kwargs):
"""Fetch the target and pass through to pandas.read_csv
Don't provide the first argument of read_csv(); it is supplied internally.
"""
import pandas
t = self.resolved_url.get_resource().get_target()
kwargs = self._update_pandas_kwargs(dtype, parse_dates, kwargs)
return pandas.read_csv(t.fspath, *args, **kwargs)
|
def function[read_csv, parameter[self, dtype, parse_dates]]:
constant[Fetch the target and pass through to pandas.read_csv
Don't provide the first argument of read_csv(); it is supplied internally.
]
import module[pandas]
variable[t] assign[=] call[call[name[self].resolved_url.get_resource, parameter[]].get_target, parameter[]]
variable[kwargs] assign[=] call[name[self]._update_pandas_kwargs, parameter[name[dtype], name[parse_dates], name[kwargs]]]
return[call[name[pandas].read_csv, parameter[name[t].fspath, <ast.Starred object at 0x7da1b195f910>]]]
|
keyword[def] identifier[read_csv] ( identifier[self] , identifier[dtype] = keyword[False] , identifier[parse_dates] = keyword[True] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[pandas]
identifier[t] = identifier[self] . identifier[resolved_url] . identifier[get_resource] (). identifier[get_target] ()
identifier[kwargs] = identifier[self] . identifier[_update_pandas_kwargs] ( identifier[dtype] , identifier[parse_dates] , identifier[kwargs] )
keyword[return] identifier[pandas] . identifier[read_csv] ( identifier[t] . identifier[fspath] ,* identifier[args] ,** identifier[kwargs] )
|
def read_csv(self, dtype=False, parse_dates=True, *args, **kwargs):
"""Fetch the target and pass through to pandas.read_csv
Don't provide the first argument of read_csv(); it is supplied internally.
"""
import pandas
t = self.resolved_url.get_resource().get_target()
kwargs = self._update_pandas_kwargs(dtype, parse_dates, kwargs)
return pandas.read_csv(t.fspath, *args, **kwargs)
|
def _raise_for_status(response):
""" make sure that only crate.exceptions are raised that are defined in
the DB-API specification """
message = ''
if 400 <= response.status < 500:
message = '%s Client Error: %s' % (response.status, response.reason)
elif 500 <= response.status < 600:
message = '%s Server Error: %s' % (response.status, response.reason)
else:
return
if response.status == 503:
raise ConnectionError(message)
if response.headers.get("content-type", "").startswith("application/json"):
data = json.loads(response.data.decode('utf-8'))
error = data.get('error', {})
error_trace = data.get('error_trace', None)
if "results" in data:
errors = [res["error_message"] for res in data["results"]
if res.get("error_message")]
if errors:
raise ProgrammingError("\n".join(errors))
if isinstance(error, dict):
raise ProgrammingError(error.get('message', ''),
error_trace=error_trace)
raise ProgrammingError(error, error_trace=error_trace)
raise ProgrammingError(message)
|
def function[_raise_for_status, parameter[response]]:
constant[ make sure that only crate.exceptions are raised that are defined in
the DB-API specification ]
variable[message] assign[=] constant[]
if compare[constant[400] less_or_equal[<=] name[response].status] begin[:]
variable[message] assign[=] binary_operation[constant[%s Client Error: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1082020>, <ast.Attribute object at 0x7da1b10826b0>]]]
if compare[name[response].status equal[==] constant[503]] begin[:]
<ast.Raise object at 0x7da1b10834f0>
if call[call[name[response].headers.get, parameter[constant[content-type], constant[]]].startswith, parameter[constant[application/json]]] begin[:]
variable[data] assign[=] call[name[json].loads, parameter[call[name[response].data.decode, parameter[constant[utf-8]]]]]
variable[error] assign[=] call[name[data].get, parameter[constant[error], dictionary[[], []]]]
variable[error_trace] assign[=] call[name[data].get, parameter[constant[error_trace], constant[None]]]
if compare[constant[results] in name[data]] begin[:]
variable[errors] assign[=] <ast.ListComp object at 0x7da1b1082f50>
if name[errors] begin[:]
<ast.Raise object at 0x7da1b101b8b0>
if call[name[isinstance], parameter[name[error], name[dict]]] begin[:]
<ast.Raise object at 0x7da1b101b880>
<ast.Raise object at 0x7da1b101b040>
<ast.Raise object at 0x7da1b101a590>
|
keyword[def] identifier[_raise_for_status] ( identifier[response] ):
literal[string]
identifier[message] = literal[string]
keyword[if] literal[int] <= identifier[response] . identifier[status] < literal[int] :
identifier[message] = literal[string] %( identifier[response] . identifier[status] , identifier[response] . identifier[reason] )
keyword[elif] literal[int] <= identifier[response] . identifier[status] < literal[int] :
identifier[message] = literal[string] %( identifier[response] . identifier[status] , identifier[response] . identifier[reason] )
keyword[else] :
keyword[return]
keyword[if] identifier[response] . identifier[status] == literal[int] :
keyword[raise] identifier[ConnectionError] ( identifier[message] )
keyword[if] identifier[response] . identifier[headers] . identifier[get] ( literal[string] , literal[string] ). identifier[startswith] ( literal[string] ):
identifier[data] = identifier[json] . identifier[loads] ( identifier[response] . identifier[data] . identifier[decode] ( literal[string] ))
identifier[error] = identifier[data] . identifier[get] ( literal[string] ,{})
identifier[error_trace] = identifier[data] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[errors] =[ identifier[res] [ literal[string] ] keyword[for] identifier[res] keyword[in] identifier[data] [ literal[string] ]
keyword[if] identifier[res] . identifier[get] ( literal[string] )]
keyword[if] identifier[errors] :
keyword[raise] identifier[ProgrammingError] ( literal[string] . identifier[join] ( identifier[errors] ))
keyword[if] identifier[isinstance] ( identifier[error] , identifier[dict] ):
keyword[raise] identifier[ProgrammingError] ( identifier[error] . identifier[get] ( literal[string] , literal[string] ),
identifier[error_trace] = identifier[error_trace] )
keyword[raise] identifier[ProgrammingError] ( identifier[error] , identifier[error_trace] = identifier[error_trace] )
keyword[raise] identifier[ProgrammingError] ( identifier[message] )
|
def _raise_for_status(response):
""" make sure that only crate.exceptions are raised that are defined in
the DB-API specification """
message = ''
if 400 <= response.status < 500:
message = '%s Client Error: %s' % (response.status, response.reason) # depends on [control=['if'], data=[]]
elif 500 <= response.status < 600:
message = '%s Server Error: %s' % (response.status, response.reason) # depends on [control=['if'], data=[]]
else:
return
if response.status == 503:
raise ConnectionError(message) # depends on [control=['if'], data=[]]
if response.headers.get('content-type', '').startswith('application/json'):
data = json.loads(response.data.decode('utf-8'))
error = data.get('error', {})
error_trace = data.get('error_trace', None)
if 'results' in data:
errors = [res['error_message'] for res in data['results'] if res.get('error_message')]
if errors:
raise ProgrammingError('\n'.join(errors)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['data']]
if isinstance(error, dict):
raise ProgrammingError(error.get('message', ''), error_trace=error_trace) # depends on [control=['if'], data=[]]
raise ProgrammingError(error, error_trace=error_trace) # depends on [control=['if'], data=[]]
raise ProgrammingError(message)
|
def set_sort_order(self, sort_order):
"""
Use the SortOrder object to sort the listings descending or ascending.
:param sort_order:
:return:
"""
if not isinstance(sort_order, SortOrder):
raise DaftException("sort_order should be an instance of SortOrder.")
self._sort_order = str(sort_order)
|
def function[set_sort_order, parameter[self, sort_order]]:
constant[
Use the SortOrder object to sort the listings descending or ascending.
:param sort_order:
:return:
]
if <ast.UnaryOp object at 0x7da2044c18a0> begin[:]
<ast.Raise object at 0x7da1b06251e0>
name[self]._sort_order assign[=] call[name[str], parameter[name[sort_order]]]
|
keyword[def] identifier[set_sort_order] ( identifier[self] , identifier[sort_order] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[sort_order] , identifier[SortOrder] ):
keyword[raise] identifier[DaftException] ( literal[string] )
identifier[self] . identifier[_sort_order] = identifier[str] ( identifier[sort_order] )
|
def set_sort_order(self, sort_order):
"""
Use the SortOrder object to sort the listings descending or ascending.
:param sort_order:
:return:
"""
if not isinstance(sort_order, SortOrder):
raise DaftException('sort_order should be an instance of SortOrder.') # depends on [control=['if'], data=[]]
self._sort_order = str(sort_order)
|
async def list(self) -> List[str]:
"""
Return list of pool names configured, empty list for none.
:return: list of pool names.
"""
LOGGER.debug('NodePoolManager.list >>>')
rv = [p['pool'] for p in await pool.list_pools()]
LOGGER.debug('NodePoolManager.list <<< %s', rv)
return rv
|
<ast.AsyncFunctionDef object at 0x7da20c6c4d00>
|
keyword[async] keyword[def] identifier[list] ( identifier[self] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[rv] =[ identifier[p] [ literal[string] ] keyword[for] identifier[p] keyword[in] keyword[await] identifier[pool] . identifier[list_pools] ()]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rv] )
keyword[return] identifier[rv]
|
async def list(self) -> List[str]:
"""
Return list of pool names configured, empty list for none.
:return: list of pool names.
"""
LOGGER.debug('NodePoolManager.list >>>')
rv = [p['pool'] for p in await pool.list_pools()]
LOGGER.debug('NodePoolManager.list <<< %s', rv)
return rv
|
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisat22_model(self.minisat)
return model if model != None else []
|
def function[get_model, parameter[self]]:
constant[
Get a model if the formula was previously satisfied.
]
if <ast.BoolOp object at 0x7da1b128add0> begin[:]
variable[model] assign[=] call[name[pysolvers].minisat22_model, parameter[name[self].minisat]]
return[<ast.IfExp object at 0x7da1b12884c0>]
|
keyword[def] identifier[get_model] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[minisat] keyword[and] identifier[self] . identifier[status] == keyword[True] :
identifier[model] = identifier[pysolvers] . identifier[minisat22_model] ( identifier[self] . identifier[minisat] )
keyword[return] identifier[model] keyword[if] identifier[model] != keyword[None] keyword[else] []
|
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisat22_model(self.minisat)
return model if model != None else [] # depends on [control=['if'], data=[]]
|
def center_of_mass(self, scalars_weight=False):
"""
Returns the coordinates for the center of mass of the mesh.
Parameters
----------
scalars_weight : bool, optional
Flag for using the mesh scalars as weights. Defaults to False.
Return
------
center : np.ndarray, float
Coordinates for the center of mass.
"""
comfilter = vtk.vtkCenterOfMass()
comfilter.SetInputData(self)
comfilter.SetUseScalarsAsWeights(scalars_weight)
comfilter.Update()
return np.array(comfilter.GetCenter())
|
def function[center_of_mass, parameter[self, scalars_weight]]:
constant[
Returns the coordinates for the center of mass of the mesh.
Parameters
----------
scalars_weight : bool, optional
Flag for using the mesh scalars as weights. Defaults to False.
Return
------
center : np.ndarray, float
Coordinates for the center of mass.
]
variable[comfilter] assign[=] call[name[vtk].vtkCenterOfMass, parameter[]]
call[name[comfilter].SetInputData, parameter[name[self]]]
call[name[comfilter].SetUseScalarsAsWeights, parameter[name[scalars_weight]]]
call[name[comfilter].Update, parameter[]]
return[call[name[np].array, parameter[call[name[comfilter].GetCenter, parameter[]]]]]
|
keyword[def] identifier[center_of_mass] ( identifier[self] , identifier[scalars_weight] = keyword[False] ):
literal[string]
identifier[comfilter] = identifier[vtk] . identifier[vtkCenterOfMass] ()
identifier[comfilter] . identifier[SetInputData] ( identifier[self] )
identifier[comfilter] . identifier[SetUseScalarsAsWeights] ( identifier[scalars_weight] )
identifier[comfilter] . identifier[Update] ()
keyword[return] identifier[np] . identifier[array] ( identifier[comfilter] . identifier[GetCenter] ())
|
def center_of_mass(self, scalars_weight=False):
"""
Returns the coordinates for the center of mass of the mesh.
Parameters
----------
scalars_weight : bool, optional
Flag for using the mesh scalars as weights. Defaults to False.
Return
------
center : np.ndarray, float
Coordinates for the center of mass.
"""
comfilter = vtk.vtkCenterOfMass()
comfilter.SetInputData(self)
comfilter.SetUseScalarsAsWeights(scalars_weight)
comfilter.Update()
return np.array(comfilter.GetCenter())
|
def sqrt(self):
"""square root operation
Returns
-------
Matrix : Matrix
square root of self
"""
if self.isdiagonal:
return type(self)(x=np.sqrt(self.__x), isdiagonal=True,
row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
elif self.shape[1] == 1: #a vector
return type(self)(x=np.sqrt(self.__x), isdiagonal=False,
row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
else:
return type(self)(x=la.sqrtm(self.__x), row_names=self.row_names,
col_names=self.col_names,
autoalign=self.autoalign)
|
def function[sqrt, parameter[self]]:
constant[square root operation
Returns
-------
Matrix : Matrix
square root of self
]
if name[self].isdiagonal begin[:]
return[call[call[name[type], parameter[name[self]]], parameter[]]]
|
keyword[def] identifier[sqrt] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[isdiagonal] :
keyword[return] identifier[type] ( identifier[self] )( identifier[x] = identifier[np] . identifier[sqrt] ( identifier[self] . identifier[__x] ), identifier[isdiagonal] = keyword[True] ,
identifier[row_names] = identifier[self] . identifier[row_names] ,
identifier[col_names] = identifier[self] . identifier[col_names] ,
identifier[autoalign] = identifier[self] . identifier[autoalign] )
keyword[elif] identifier[self] . identifier[shape] [ literal[int] ]== literal[int] :
keyword[return] identifier[type] ( identifier[self] )( identifier[x] = identifier[np] . identifier[sqrt] ( identifier[self] . identifier[__x] ), identifier[isdiagonal] = keyword[False] ,
identifier[row_names] = identifier[self] . identifier[row_names] ,
identifier[col_names] = identifier[self] . identifier[col_names] ,
identifier[autoalign] = identifier[self] . identifier[autoalign] )
keyword[else] :
keyword[return] identifier[type] ( identifier[self] )( identifier[x] = identifier[la] . identifier[sqrtm] ( identifier[self] . identifier[__x] ), identifier[row_names] = identifier[self] . identifier[row_names] ,
identifier[col_names] = identifier[self] . identifier[col_names] ,
identifier[autoalign] = identifier[self] . identifier[autoalign] )
|
def sqrt(self):
"""square root operation
Returns
-------
Matrix : Matrix
square root of self
"""
if self.isdiagonal:
return type(self)(x=np.sqrt(self.__x), isdiagonal=True, row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign) # depends on [control=['if'], data=[]]
elif self.shape[1] == 1: #a vector
return type(self)(x=np.sqrt(self.__x), isdiagonal=False, row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign) # depends on [control=['if'], data=[]]
else:
return type(self)(x=la.sqrtm(self.__x), row_names=self.row_names, col_names=self.col_names, autoalign=self.autoalign)
|
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(
cluster, role, environ, topology_name)
start_time = self.get_argument_starttime()
end_time = self.get_argument_endtime()
self.validateInterval(start_time, end_time)
query = self.get_argument_query()
metrics = yield tornado.gen.Task(self.executeMetricsQuery,
topology.tmaster, query, int(start_time), int(end_time))
self.write_success_response(metrics)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
|
def function[get, parameter[self]]:
constant[ get method ]
<ast.Try object at 0x7da18ede7160>
|
keyword[def] identifier[get] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[cluster] = identifier[self] . identifier[get_argument_cluster] ()
identifier[role] = identifier[self] . identifier[get_argument_role] ()
identifier[environ] = identifier[self] . identifier[get_argument_environ] ()
identifier[topology_name] = identifier[self] . identifier[get_argument_topology] ()
identifier[topology] = identifier[self] . identifier[tracker] . identifier[getTopologyByClusterRoleEnvironAndName] (
identifier[cluster] , identifier[role] , identifier[environ] , identifier[topology_name] )
identifier[start_time] = identifier[self] . identifier[get_argument_starttime] ()
identifier[end_time] = identifier[self] . identifier[get_argument_endtime] ()
identifier[self] . identifier[validateInterval] ( identifier[start_time] , identifier[end_time] )
identifier[query] = identifier[self] . identifier[get_argument_query] ()
identifier[metrics] = keyword[yield] identifier[tornado] . identifier[gen] . identifier[Task] ( identifier[self] . identifier[executeMetricsQuery] ,
identifier[topology] . identifier[tmaster] , identifier[query] , identifier[int] ( identifier[start_time] ), identifier[int] ( identifier[end_time] ))
identifier[self] . identifier[write_success_response] ( identifier[metrics] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[Log] . identifier[debug] ( identifier[traceback] . identifier[format_exc] ())
identifier[self] . identifier[write_error_response] ( identifier[e] )
|
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(cluster, role, environ, topology_name)
start_time = self.get_argument_starttime()
end_time = self.get_argument_endtime()
self.validateInterval(start_time, end_time)
query = self.get_argument_query()
metrics = (yield tornado.gen.Task(self.executeMetricsQuery, topology.tmaster, query, int(start_time), int(end_time)))
self.write_success_response(metrics) # depends on [control=['try'], data=[]]
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e) # depends on [control=['except'], data=['e']]
|
def async_save_result(self):
"""
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
"""
if hasattr(self, "_async_future") and self._async_future.done():
self._async_future.result()
return True
else:
return False
|
def function[async_save_result, parameter[self]]:
constant[
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
]
if <ast.BoolOp object at 0x7da1b06c9000> begin[:]
call[name[self]._async_future.result, parameter[]]
return[constant[True]]
|
keyword[def] identifier[async_save_result] ( identifier[self] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[_async_future] . identifier[done] ():
identifier[self] . identifier[_async_future] . identifier[result] ()
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
|
def async_save_result(self):
"""
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
"""
if hasattr(self, '_async_future') and self._async_future.done():
self._async_future.result()
return True # depends on [control=['if'], data=[]]
else:
return False
|
def delete(self, file_id):
"""Given an file_id, delete this stored file's files collection document
and associated chunks from a GridFS bucket.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to delete
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.delete(file_id)
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be deleted.
"""
res = self._files.delete_one({"_id": file_id})
self._chunks.delete_many({"files_id": file_id})
if not res.deleted_count:
raise NoFile(
"no file could be deleted because none matched %s" % file_id)
|
def function[delete, parameter[self, file_id]]:
constant[Given an file_id, delete this stored file's files collection document
and associated chunks from a GridFS bucket.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to delete
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.delete(file_id)
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be deleted.
]
variable[res] assign[=] call[name[self]._files.delete_one, parameter[dictionary[[<ast.Constant object at 0x7da1b26acf70>], [<ast.Name object at 0x7da1b26af4f0>]]]]
call[name[self]._chunks.delete_many, parameter[dictionary[[<ast.Constant object at 0x7da1b26ad240>], [<ast.Name object at 0x7da1b26adc60>]]]]
if <ast.UnaryOp object at 0x7da1b26ad210> begin[:]
<ast.Raise object at 0x7da1b26ac910>
|
keyword[def] identifier[delete] ( identifier[self] , identifier[file_id] ):
literal[string]
identifier[res] = identifier[self] . identifier[_files] . identifier[delete_one] ({ literal[string] : identifier[file_id] })
identifier[self] . identifier[_chunks] . identifier[delete_many] ({ literal[string] : identifier[file_id] })
keyword[if] keyword[not] identifier[res] . identifier[deleted_count] :
keyword[raise] identifier[NoFile] (
literal[string] % identifier[file_id] )
|
def delete(self, file_id):
"""Given an file_id, delete this stored file's files collection document
and associated chunks from a GridFS bucket.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to delete
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.delete(file_id)
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be deleted.
"""
res = self._files.delete_one({'_id': file_id})
self._chunks.delete_many({'files_id': file_id})
if not res.deleted_count:
raise NoFile('no file could be deleted because none matched %s' % file_id) # depends on [control=['if'], data=[]]
|
def try_open (*args, **kwargs):
"""Simply a wrapper for io.open(), unless an IOError with errno=2 (ENOENT) is
raised, in which case None is retured.
"""
try:
return io.open (*args, **kwargs)
except IOError as e:
if e.errno == 2:
return None
raise
|
def function[try_open, parameter[]]:
constant[Simply a wrapper for io.open(), unless an IOError with errno=2 (ENOENT) is
raised, in which case None is retured.
]
<ast.Try object at 0x7da1b26b7370>
|
keyword[def] identifier[try_open] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[io] . identifier[open] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == literal[int] :
keyword[return] keyword[None]
keyword[raise]
|
def try_open(*args, **kwargs):
"""Simply a wrapper for io.open(), unless an IOError with errno=2 (ENOENT) is
raised, in which case None is retured.
"""
try:
return io.open(*args, **kwargs) # depends on [control=['try'], data=[]]
except IOError as e:
if e.errno == 2:
return None # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['e']]
|
def make_interactive_tree(matrix=None,labels=None):
'''make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out.
'''
from scipy.cluster.hierarchy import (
dendrogram,
linkage,
to_tree
)
d3 = None
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if isinstance(matrix,pandas.DataFrame):
Z = linkage(matrix, 'ward') # clusters
T = to_tree(Z, rd=False)
if labels == None:
labels = matrix.index.tolist()
lookup = dict(zip(range(len(labels)), labels))
# Create a dendrogram object without plotting
dend = dendrogram(Z,no_plot=True,
orientation="right",
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=8., # font size for the x axis labels
labels=labels)
d3 = dict(children=[], name="root")
add_node(T, d3)
label_tree(d3["children"][0],lookup)
else:
bot.warning('Please provide data as pandas Data Frame.')
return d3
|
def function[make_interactive_tree, parameter[matrix, labels]]:
constant[make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out.
]
from relative_module[scipy.cluster.hierarchy] import module[dendrogram], module[linkage], module[to_tree]
variable[d3] assign[=] constant[None]
from relative_module[scipy.cluster.hierarchy] import module[cophenet]
from relative_module[scipy.spatial.distance] import module[pdist]
if call[name[isinstance], parameter[name[matrix], name[pandas].DataFrame]] begin[:]
variable[Z] assign[=] call[name[linkage], parameter[name[matrix], constant[ward]]]
variable[T] assign[=] call[name[to_tree], parameter[name[Z]]]
if compare[name[labels] equal[==] constant[None]] begin[:]
variable[labels] assign[=] call[name[matrix].index.tolist, parameter[]]
variable[lookup] assign[=] call[name[dict], parameter[call[name[zip], parameter[call[name[range], parameter[call[name[len], parameter[name[labels]]]]], name[labels]]]]]
variable[dend] assign[=] call[name[dendrogram], parameter[name[Z]]]
variable[d3] assign[=] call[name[dict], parameter[]]
call[name[add_node], parameter[name[T], name[d3]]]
call[name[label_tree], parameter[call[call[name[d3]][constant[children]]][constant[0]], name[lookup]]]
return[name[d3]]
|
keyword[def] identifier[make_interactive_tree] ( identifier[matrix] = keyword[None] , identifier[labels] = keyword[None] ):
literal[string]
keyword[from] identifier[scipy] . identifier[cluster] . identifier[hierarchy] keyword[import] (
identifier[dendrogram] ,
identifier[linkage] ,
identifier[to_tree]
)
identifier[d3] = keyword[None]
keyword[from] identifier[scipy] . identifier[cluster] . identifier[hierarchy] keyword[import] identifier[cophenet]
keyword[from] identifier[scipy] . identifier[spatial] . identifier[distance] keyword[import] identifier[pdist]
keyword[if] identifier[isinstance] ( identifier[matrix] , identifier[pandas] . identifier[DataFrame] ):
identifier[Z] = identifier[linkage] ( identifier[matrix] , literal[string] )
identifier[T] = identifier[to_tree] ( identifier[Z] , identifier[rd] = keyword[False] )
keyword[if] identifier[labels] == keyword[None] :
identifier[labels] = identifier[matrix] . identifier[index] . identifier[tolist] ()
identifier[lookup] = identifier[dict] ( identifier[zip] ( identifier[range] ( identifier[len] ( identifier[labels] )), identifier[labels] ))
identifier[dend] = identifier[dendrogram] ( identifier[Z] , identifier[no_plot] = keyword[True] ,
identifier[orientation] = literal[string] ,
identifier[leaf_rotation] = literal[int] ,
identifier[leaf_font_size] = literal[int] ,
identifier[labels] = identifier[labels] )
identifier[d3] = identifier[dict] ( identifier[children] =[], identifier[name] = literal[string] )
identifier[add_node] ( identifier[T] , identifier[d3] )
identifier[label_tree] ( identifier[d3] [ literal[string] ][ literal[int] ], identifier[lookup] )
keyword[else] :
identifier[bot] . identifier[warning] ( literal[string] )
keyword[return] identifier[d3]
|
def make_interactive_tree(matrix=None, labels=None):
"""make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out.
"""
from scipy.cluster.hierarchy import dendrogram, linkage, to_tree
d3 = None
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if isinstance(matrix, pandas.DataFrame):
Z = linkage(matrix, 'ward') # clusters
T = to_tree(Z, rd=False)
if labels == None:
labels = matrix.index.tolist() # depends on [control=['if'], data=['labels']]
lookup = dict(zip(range(len(labels)), labels))
# Create a dendrogram object without plotting
# rotates the x axis labels
# font size for the x axis labels
dend = dendrogram(Z, no_plot=True, orientation='right', leaf_rotation=90.0, leaf_font_size=8.0, labels=labels)
d3 = dict(children=[], name='root')
add_node(T, d3)
label_tree(d3['children'][0], lookup) # depends on [control=['if'], data=[]]
else:
bot.warning('Please provide data as pandas Data Frame.')
return d3
|
def battery_percent(self):
"""Get batteries capacity percent."""
if not batinfo_tag or not self.bat.stat:
return []
# Init the bsum (sum of percent)
# and Loop over batteries (yes a computer could have more than 1 battery)
bsum = 0
for b in self.bat.stat:
try:
bsum += int(b.capacity)
except ValueError:
return []
# Return the global percent
return int(bsum / len(self.bat.stat))
|
def function[battery_percent, parameter[self]]:
constant[Get batteries capacity percent.]
if <ast.BoolOp object at 0x7da207f03be0> begin[:]
return[list[[]]]
variable[bsum] assign[=] constant[0]
for taget[name[b]] in starred[name[self].bat.stat] begin[:]
<ast.Try object at 0x7da207f00af0>
return[call[name[int], parameter[binary_operation[name[bsum] / call[name[len], parameter[name[self].bat.stat]]]]]]
|
keyword[def] identifier[battery_percent] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[batinfo_tag] keyword[or] keyword[not] identifier[self] . identifier[bat] . identifier[stat] :
keyword[return] []
identifier[bsum] = literal[int]
keyword[for] identifier[b] keyword[in] identifier[self] . identifier[bat] . identifier[stat] :
keyword[try] :
identifier[bsum] += identifier[int] ( identifier[b] . identifier[capacity] )
keyword[except] identifier[ValueError] :
keyword[return] []
keyword[return] identifier[int] ( identifier[bsum] / identifier[len] ( identifier[self] . identifier[bat] . identifier[stat] ))
|
def battery_percent(self):
"""Get batteries capacity percent."""
if not batinfo_tag or not self.bat.stat:
return [] # depends on [control=['if'], data=[]]
# Init the bsum (sum of percent)
# and Loop over batteries (yes a computer could have more than 1 battery)
bsum = 0
for b in self.bat.stat:
try:
bsum += int(b.capacity) # depends on [control=['try'], data=[]]
except ValueError:
return [] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['b']]
# Return the global percent
return int(bsum / len(self.bat.stat))
|
def _load_file(self, filename):
"""Load a vtkMultiBlockDataSet from a file (extension ``.vtm`` or
``.vtmb``)
"""
filename = os.path.abspath(os.path.expanduser(filename))
# test if file exists
if not os.path.isfile(filename):
raise Exception('File %s does not exist' % filename)
# Get extension
ext = vtki.get_ext(filename)
# Extensions: .vtm and .vtmb
# Select reader
if ext in ['.vtm', '.vtmb']:
reader = vtk.vtkXMLMultiBlockDataReader()
else:
raise IOError('File extension must be either "vtm" or "vtmb"')
# Load file
reader.SetFileName(filename)
reader.Update()
self.ShallowCopy(reader.GetOutput())
|
def function[_load_file, parameter[self, filename]]:
constant[Load a vtkMultiBlockDataSet from a file (extension ``.vtm`` or
``.vtmb``)
]
variable[filename] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.expanduser, parameter[name[filename]]]]]
if <ast.UnaryOp object at 0x7da18dc05150> begin[:]
<ast.Raise object at 0x7da18dc06e90>
variable[ext] assign[=] call[name[vtki].get_ext, parameter[name[filename]]]
if compare[name[ext] in list[[<ast.Constant object at 0x7da20c6c48b0>, <ast.Constant object at 0x7da18f00dbd0>]]] begin[:]
variable[reader] assign[=] call[name[vtk].vtkXMLMultiBlockDataReader, parameter[]]
call[name[reader].SetFileName, parameter[name[filename]]]
call[name[reader].Update, parameter[]]
call[name[self].ShallowCopy, parameter[call[name[reader].GetOutput, parameter[]]]]
|
keyword[def] identifier[_load_file] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[filename] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[filename] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ):
keyword[raise] identifier[Exception] ( literal[string] % identifier[filename] )
identifier[ext] = identifier[vtki] . identifier[get_ext] ( identifier[filename] )
keyword[if] identifier[ext] keyword[in] [ literal[string] , literal[string] ]:
identifier[reader] = identifier[vtk] . identifier[vtkXMLMultiBlockDataReader] ()
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] )
identifier[reader] . identifier[SetFileName] ( identifier[filename] )
identifier[reader] . identifier[Update] ()
identifier[self] . identifier[ShallowCopy] ( identifier[reader] . identifier[GetOutput] ())
|
def _load_file(self, filename):
"""Load a vtkMultiBlockDataSet from a file (extension ``.vtm`` or
``.vtmb``)
"""
filename = os.path.abspath(os.path.expanduser(filename))
# test if file exists
if not os.path.isfile(filename):
raise Exception('File %s does not exist' % filename) # depends on [control=['if'], data=[]]
# Get extension
ext = vtki.get_ext(filename)
# Extensions: .vtm and .vtmb
# Select reader
if ext in ['.vtm', '.vtmb']:
reader = vtk.vtkXMLMultiBlockDataReader() # depends on [control=['if'], data=[]]
else:
raise IOError('File extension must be either "vtm" or "vtmb"')
# Load file
reader.SetFileName(filename)
reader.Update()
self.ShallowCopy(reader.GetOutput())
|
def profile_validation(self, status):
"""Return run total value."""
self.selected_profile.data.setdefault('validation_pass_count', 0)
self.selected_profile.data.setdefault('validation_fail_count', 0)
if status:
self.selected_profile.data['validation_pass_count'] += 1
else:
self.selected_profile.data['validation_fail_count'] += 1
|
def function[profile_validation, parameter[self, status]]:
constant[Return run total value.]
call[name[self].selected_profile.data.setdefault, parameter[constant[validation_pass_count], constant[0]]]
call[name[self].selected_profile.data.setdefault, parameter[constant[validation_fail_count], constant[0]]]
if name[status] begin[:]
<ast.AugAssign object at 0x7da2041dabf0>
|
keyword[def] identifier[profile_validation] ( identifier[self] , identifier[status] ):
literal[string]
identifier[self] . identifier[selected_profile] . identifier[data] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[self] . identifier[selected_profile] . identifier[data] . identifier[setdefault] ( literal[string] , literal[int] )
keyword[if] identifier[status] :
identifier[self] . identifier[selected_profile] . identifier[data] [ literal[string] ]+= literal[int]
keyword[else] :
identifier[self] . identifier[selected_profile] . identifier[data] [ literal[string] ]+= literal[int]
|
def profile_validation(self, status):
"""Return run total value."""
self.selected_profile.data.setdefault('validation_pass_count', 0)
self.selected_profile.data.setdefault('validation_fail_count', 0)
if status:
self.selected_profile.data['validation_pass_count'] += 1 # depends on [control=['if'], data=[]]
else:
self.selected_profile.data['validation_fail_count'] += 1
|
def set_longest_orf(self, feature_id, organism=None, sequence=None):
"""
Automatically pick the longest ORF in a feature
:type feature_id: str
:param feature_id: Feature UUID
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
"""
data = {
'features': [
{
'uniquename': feature_id,
}
]
}
data = self._update_data(data, organism, sequence)
return self.post('setLongestOrf', data)
|
def function[set_longest_orf, parameter[self, feature_id, organism, sequence]]:
constant[
Automatically pick the longest ORF in a feature
:type feature_id: str
:param feature_id: Feature UUID
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b254dff0>], [<ast.List object at 0x7da1b254f5e0>]]
variable[data] assign[=] call[name[self]._update_data, parameter[name[data], name[organism], name[sequence]]]
return[call[name[self].post, parameter[constant[setLongestOrf], name[data]]]]
|
keyword[def] identifier[set_longest_orf] ( identifier[self] , identifier[feature_id] , identifier[organism] = keyword[None] , identifier[sequence] = keyword[None] ):
literal[string]
identifier[data] ={
literal[string] :[
{
literal[string] : identifier[feature_id] ,
}
]
}
identifier[data] = identifier[self] . identifier[_update_data] ( identifier[data] , identifier[organism] , identifier[sequence] )
keyword[return] identifier[self] . identifier[post] ( literal[string] , identifier[data] )
|
def set_longest_orf(self, feature_id, organism=None, sequence=None):
"""
Automatically pick the longest ORF in a feature
:type feature_id: str
:param feature_id: Feature UUID
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
"""
data = {'features': [{'uniquename': feature_id}]}
data = self._update_data(data, organism, sequence)
return self.post('setLongestOrf', data)
|
def calc_translations_parallel(images):
"""Calculate image translations in parallel.
Parameters
----------
images : ImageCollection
Images as instance of ImageCollection.
Returns
-------
2d array, (ty, tx)
ty and tx is translation to previous image in respectively
x or y direction.
"""
w = Parallel(n_jobs=_CPUS)
res = w(delayed(images.translation)(img) for img in images)
# save results to Image object, as Parallel is spawning another process
for i,translation in enumerate(res):
images[i].translation = translation
return np.array(res)
|
def function[calc_translations_parallel, parameter[images]]:
constant[Calculate image translations in parallel.
Parameters
----------
images : ImageCollection
Images as instance of ImageCollection.
Returns
-------
2d array, (ty, tx)
ty and tx is translation to previous image in respectively
x or y direction.
]
variable[w] assign[=] call[name[Parallel], parameter[]]
variable[res] assign[=] call[name[w], parameter[<ast.GeneratorExp object at 0x7da1b0146fe0>]]
for taget[tuple[[<ast.Name object at 0x7da1b0169210>, <ast.Name object at 0x7da1b0169240>]]] in starred[call[name[enumerate], parameter[name[res]]]] begin[:]
call[name[images]][name[i]].translation assign[=] name[translation]
return[call[name[np].array, parameter[name[res]]]]
|
keyword[def] identifier[calc_translations_parallel] ( identifier[images] ):
literal[string]
identifier[w] = identifier[Parallel] ( identifier[n_jobs] = identifier[_CPUS] )
identifier[res] = identifier[w] ( identifier[delayed] ( identifier[images] . identifier[translation] )( identifier[img] ) keyword[for] identifier[img] keyword[in] identifier[images] )
keyword[for] identifier[i] , identifier[translation] keyword[in] identifier[enumerate] ( identifier[res] ):
identifier[images] [ identifier[i] ]. identifier[translation] = identifier[translation]
keyword[return] identifier[np] . identifier[array] ( identifier[res] )
|
def calc_translations_parallel(images):
"""Calculate image translations in parallel.
Parameters
----------
images : ImageCollection
Images as instance of ImageCollection.
Returns
-------
2d array, (ty, tx)
ty and tx is translation to previous image in respectively
x or y direction.
"""
w = Parallel(n_jobs=_CPUS)
res = w((delayed(images.translation)(img) for img in images))
# save results to Image object, as Parallel is spawning another process
for (i, translation) in enumerate(res):
images[i].translation = translation # depends on [control=['for'], data=[]]
return np.array(res)
|
def delete(self, instance, disconnect=True):
'''
Delete an *instance* from the instance pool and optionally *disconnect*
it from any links it might be connected to. If the *instance* is not
part of the metaclass, a *MetaException* is thrown.
'''
if instance in self.storage:
self.storage.remove(instance)
else:
raise DeleteException("Instance not found in the instance pool")
if not disconnect:
return
for link in self.links.values():
if instance not in link:
continue
for other in link[instance]:
unrelate(instance, other, link.rel_id, link.phrase)
|
def function[delete, parameter[self, instance, disconnect]]:
constant[
Delete an *instance* from the instance pool and optionally *disconnect*
it from any links it might be connected to. If the *instance* is not
part of the metaclass, a *MetaException* is thrown.
]
if compare[name[instance] in name[self].storage] begin[:]
call[name[self].storage.remove, parameter[name[instance]]]
if <ast.UnaryOp object at 0x7da204623b50> begin[:]
return[None]
for taget[name[link]] in starred[call[name[self].links.values, parameter[]]] begin[:]
if compare[name[instance] <ast.NotIn object at 0x7da2590d7190> name[link]] begin[:]
continue
for taget[name[other]] in starred[call[name[link]][name[instance]]] begin[:]
call[name[unrelate], parameter[name[instance], name[other], name[link].rel_id, name[link].phrase]]
|
keyword[def] identifier[delete] ( identifier[self] , identifier[instance] , identifier[disconnect] = keyword[True] ):
literal[string]
keyword[if] identifier[instance] keyword[in] identifier[self] . identifier[storage] :
identifier[self] . identifier[storage] . identifier[remove] ( identifier[instance] )
keyword[else] :
keyword[raise] identifier[DeleteException] ( literal[string] )
keyword[if] keyword[not] identifier[disconnect] :
keyword[return]
keyword[for] identifier[link] keyword[in] identifier[self] . identifier[links] . identifier[values] ():
keyword[if] identifier[instance] keyword[not] keyword[in] identifier[link] :
keyword[continue]
keyword[for] identifier[other] keyword[in] identifier[link] [ identifier[instance] ]:
identifier[unrelate] ( identifier[instance] , identifier[other] , identifier[link] . identifier[rel_id] , identifier[link] . identifier[phrase] )
|
def delete(self, instance, disconnect=True):
"""
Delete an *instance* from the instance pool and optionally *disconnect*
it from any links it might be connected to. If the *instance* is not
part of the metaclass, a *MetaException* is thrown.
"""
if instance in self.storage:
self.storage.remove(instance) # depends on [control=['if'], data=['instance']]
else:
raise DeleteException('Instance not found in the instance pool')
if not disconnect:
return # depends on [control=['if'], data=[]]
for link in self.links.values():
if instance not in link:
continue # depends on [control=['if'], data=[]]
for other in link[instance]:
unrelate(instance, other, link.rel_id, link.phrase) # depends on [control=['for'], data=['other']] # depends on [control=['for'], data=['link']]
|
def hide_dataset(dataset_id, exceptions, read, write, share,**kwargs):
"""
Hide a particular piece of data so it can only be seen by its owner.
Only an owner can hide (and unhide) data.
Data with no owner cannot be hidden.
The exceptions paramater lists the usernames of those with permission to view the data
read, write and share indicate whether these users can read, edit and share this data.
"""
user_id = kwargs.get('user_id')
dataset_i = _get_dataset(dataset_id)
#check that I can hide the dataset
if dataset_i.created_by != int(user_id):
raise HydraError('Permission denied. '
'User %s is not the owner of dataset %s'
%(user_id, dataset_i.name))
dataset_i.hidden = 'Y'
if exceptions is not None:
for username in exceptions:
user_i = _get_user(username)
dataset_i.set_owner(user_i.id, read=read, write=write, share=share)
db.DBSession.flush()
|
def function[hide_dataset, parameter[dataset_id, exceptions, read, write, share]]:
constant[
Hide a particular piece of data so it can only be seen by its owner.
Only an owner can hide (and unhide) data.
Data with no owner cannot be hidden.
The exceptions paramater lists the usernames of those with permission to view the data
read, write and share indicate whether these users can read, edit and share this data.
]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
variable[dataset_i] assign[=] call[name[_get_dataset], parameter[name[dataset_id]]]
if compare[name[dataset_i].created_by not_equal[!=] call[name[int], parameter[name[user_id]]]] begin[:]
<ast.Raise object at 0x7da20e957b20>
name[dataset_i].hidden assign[=] constant[Y]
if compare[name[exceptions] is_not constant[None]] begin[:]
for taget[name[username]] in starred[name[exceptions]] begin[:]
variable[user_i] assign[=] call[name[_get_user], parameter[name[username]]]
call[name[dataset_i].set_owner, parameter[name[user_i].id]]
call[name[db].DBSession.flush, parameter[]]
|
keyword[def] identifier[hide_dataset] ( identifier[dataset_id] , identifier[exceptions] , identifier[read] , identifier[write] , identifier[share] ,** identifier[kwargs] ):
literal[string]
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[dataset_i] = identifier[_get_dataset] ( identifier[dataset_id] )
keyword[if] identifier[dataset_i] . identifier[created_by] != identifier[int] ( identifier[user_id] ):
keyword[raise] identifier[HydraError] ( literal[string]
literal[string]
%( identifier[user_id] , identifier[dataset_i] . identifier[name] ))
identifier[dataset_i] . identifier[hidden] = literal[string]
keyword[if] identifier[exceptions] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[username] keyword[in] identifier[exceptions] :
identifier[user_i] = identifier[_get_user] ( identifier[username] )
identifier[dataset_i] . identifier[set_owner] ( identifier[user_i] . identifier[id] , identifier[read] = identifier[read] , identifier[write] = identifier[write] , identifier[share] = identifier[share] )
identifier[db] . identifier[DBSession] . identifier[flush] ()
|
def hide_dataset(dataset_id, exceptions, read, write, share, **kwargs):
"""
Hide a particular piece of data so it can only be seen by its owner.
Only an owner can hide (and unhide) data.
Data with no owner cannot be hidden.
The exceptions paramater lists the usernames of those with permission to view the data
read, write and share indicate whether these users can read, edit and share this data.
"""
user_id = kwargs.get('user_id')
dataset_i = _get_dataset(dataset_id)
#check that I can hide the dataset
if dataset_i.created_by != int(user_id):
raise HydraError('Permission denied. User %s is not the owner of dataset %s' % (user_id, dataset_i.name)) # depends on [control=['if'], data=[]]
dataset_i.hidden = 'Y'
if exceptions is not None:
for username in exceptions:
user_i = _get_user(username)
dataset_i.set_owner(user_i.id, read=read, write=write, share=share) # depends on [control=['for'], data=['username']] # depends on [control=['if'], data=['exceptions']]
db.DBSession.flush()
|
def status(name, maximum=None, minimum=None, absolute=False, free=False):
'''
Return the current disk usage stats for the named mount point
name
Disk mount or directory for which to check used space
maximum
The maximum disk utilization
minimum
The minimum disk utilization
absolute
By default, the utilization is measured in percentage. Set
the `absolute` flag to use kilobytes.
.. versionadded:: 2016.11.0
free
By default, `minimum` & `maximum` refer to the amount of used space.
Set to `True` to evaluate the free space instead.
'''
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name,
'result': False,
'comment': '',
'changes': {},
'data': {}} # Data field for monitoring state
# Validate extrema
if maximum is not None:
if not absolute:
maximum, comment = _validate_int('maximum', maximum, [0, 100])
else:
maximum, comment = _validate_int('maximum', maximum, strip='KB')
ret['comment'] += comment
if minimum is not None:
if not absolute:
minimum, comment = _validate_int('minimum', minimum, [0, 100])
else:
minimum, comment = _validate_int('minimum', minimum, strip='KB')
ret['comment'] += comment
if minimum is not None and maximum is not None:
if minimum >= maximum:
ret['comment'] += 'minimum must be less than maximum '
if ret['comment']:
return ret
data = __salt__['disk.usage']()
# Validate name
if name not in data:
ret['comment'] += ('Disk mount {0} not present. '.format(name))
return _status_path(name, ret, minimum, maximum, absolute, free)
else:
return _status_mount(name, ret, minimum, maximum, absolute, free, data)
|
def function[status, parameter[name, maximum, minimum, absolute, free]]:
constant[
Return the current disk usage stats for the named mount point
name
Disk mount or directory for which to check used space
maximum
The maximum disk utilization
minimum
The minimum disk utilization
absolute
By default, the utilization is measured in percentage. Set
the `absolute` flag to use kilobytes.
.. versionadded:: 2016.11.0
free
By default, `minimum` & `maximum` refer to the amount of used space.
Set to `True` to evaluate the free space instead.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf4c0>, <ast.Constant object at 0x7da18c4ccb80>, <ast.Constant object at 0x7da18c4ce7a0>, <ast.Constant object at 0x7da18c4ce230>, <ast.Constant object at 0x7da18c4cf910>], [<ast.Name object at 0x7da18c4cfbb0>, <ast.Constant object at 0x7da18c4ccd60>, <ast.Constant object at 0x7da18c4ce7d0>, <ast.Dict object at 0x7da18c4ce530>, <ast.Dict object at 0x7da18c4cc190>]]
if compare[name[maximum] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da18c4cd960> begin[:]
<ast.Tuple object at 0x7da18c4ce0b0> assign[=] call[name[_validate_int], parameter[constant[maximum], name[maximum], list[[<ast.Constant object at 0x7da18c4ccee0>, <ast.Constant object at 0x7da18c4ce6b0>]]]]
<ast.AugAssign object at 0x7da18c4cd180>
if compare[name[minimum] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da18c4cd0c0> begin[:]
<ast.Tuple object at 0x7da18c4cfc70> assign[=] call[name[_validate_int], parameter[constant[minimum], name[minimum], list[[<ast.Constant object at 0x7da18c4ceb60>, <ast.Constant object at 0x7da18c4cef50>]]]]
<ast.AugAssign object at 0x7da18c4cd8d0>
if <ast.BoolOp object at 0x7da18c4cc7f0> begin[:]
if compare[name[minimum] greater_or_equal[>=] name[maximum]] begin[:]
<ast.AugAssign object at 0x7da18c4ce620>
if call[name[ret]][constant[comment]] begin[:]
return[name[ret]]
variable[data] assign[=] call[call[name[__salt__]][constant[disk.usage]], parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
<ast.AugAssign object at 0x7da18c4cc2e0>
return[call[name[_status_path], parameter[name[name], name[ret], name[minimum], name[maximum], name[absolute], name[free]]]]
|
keyword[def] identifier[status] ( identifier[name] , identifier[maximum] = keyword[None] , identifier[minimum] = keyword[None] , identifier[absolute] = keyword[False] , identifier[free] = keyword[False] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] : keyword[False] ,
literal[string] : literal[string] ,
literal[string] :{},
literal[string] :{}}
keyword[if] identifier[maximum] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[absolute] :
identifier[maximum] , identifier[comment] = identifier[_validate_int] ( literal[string] , identifier[maximum] ,[ literal[int] , literal[int] ])
keyword[else] :
identifier[maximum] , identifier[comment] = identifier[_validate_int] ( literal[string] , identifier[maximum] , identifier[strip] = literal[string] )
identifier[ret] [ literal[string] ]+= identifier[comment]
keyword[if] identifier[minimum] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[absolute] :
identifier[minimum] , identifier[comment] = identifier[_validate_int] ( literal[string] , identifier[minimum] ,[ literal[int] , literal[int] ])
keyword[else] :
identifier[minimum] , identifier[comment] = identifier[_validate_int] ( literal[string] , identifier[minimum] , identifier[strip] = literal[string] )
identifier[ret] [ literal[string] ]+= identifier[comment]
keyword[if] identifier[minimum] keyword[is] keyword[not] keyword[None] keyword[and] identifier[maximum] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[minimum] >= identifier[maximum] :
identifier[ret] [ literal[string] ]+= literal[string]
keyword[if] identifier[ret] [ literal[string] ]:
keyword[return] identifier[ret]
identifier[data] = identifier[__salt__] [ literal[string] ]()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[data] :
identifier[ret] [ literal[string] ]+=( literal[string] . identifier[format] ( identifier[name] ))
keyword[return] identifier[_status_path] ( identifier[name] , identifier[ret] , identifier[minimum] , identifier[maximum] , identifier[absolute] , identifier[free] )
keyword[else] :
keyword[return] identifier[_status_mount] ( identifier[name] , identifier[ret] , identifier[minimum] , identifier[maximum] , identifier[absolute] , identifier[free] , identifier[data] )
|
def status(name, maximum=None, minimum=None, absolute=False, free=False):
"""
Return the current disk usage stats for the named mount point
name
Disk mount or directory for which to check used space
maximum
The maximum disk utilization
minimum
The minimum disk utilization
absolute
By default, the utilization is measured in percentage. Set
the `absolute` flag to use kilobytes.
.. versionadded:: 2016.11.0
free
By default, `minimum` & `maximum` refer to the amount of used space.
Set to `True` to evaluate the free space instead.
"""
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state
# Validate extrema
if maximum is not None:
if not absolute:
(maximum, comment) = _validate_int('maximum', maximum, [0, 100]) # depends on [control=['if'], data=[]]
else:
(maximum, comment) = _validate_int('maximum', maximum, strip='KB')
ret['comment'] += comment # depends on [control=['if'], data=['maximum']]
if minimum is not None:
if not absolute:
(minimum, comment) = _validate_int('minimum', minimum, [0, 100]) # depends on [control=['if'], data=[]]
else:
(minimum, comment) = _validate_int('minimum', minimum, strip='KB')
ret['comment'] += comment # depends on [control=['if'], data=['minimum']]
if minimum is not None and maximum is not None:
if minimum >= maximum:
ret['comment'] += 'minimum must be less than maximum ' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if ret['comment']:
return ret # depends on [control=['if'], data=[]]
data = __salt__['disk.usage']()
# Validate name
if name not in data:
ret['comment'] += 'Disk mount {0} not present. '.format(name)
return _status_path(name, ret, minimum, maximum, absolute, free) # depends on [control=['if'], data=['name']]
else:
return _status_mount(name, ret, minimum, maximum, absolute, free, data)
|
def insert_psd_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group(
"Options to select the method of PSD generation",
"The options --psd-model, --psd-file, --asd-file, "
"and --psd-estimation are mutually exclusive.")
psd_options.add_argument("--psd-model", nargs="+",
action=MultiDetOptionAction, metavar='IFO:MODEL',
help="Get PSD from given analytical model. "
"Choose from %s" %(', '.join(get_psd_model_list()),))
psd_options.add_argument("--psd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given PSD ASCII file")
psd_options.add_argument("--asd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given ASD ASCII file")
psd_options.add_argument("--psd-estimation", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Measure PSD from the data, using given "
"average method. Choose from "
"mean, median or median-mean.")
psd_options.add_argument("--psd-segment-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Required for --psd-estimation) The segment "
"length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:STRIDE',
help="(Required for --psd-estimation) The separation"
" between consecutive segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, nargs="+",
default=None,
action=MultiDetOptionAction, metavar='IFO:NUM',
help="(Optional, used only with --psd-estimation). "
"If given PSDs will be estimated using only "
"this number of segments. If more data is "
"given than needed to make this number of "
"segments than excess data will not be used in "
"the PSD estimate. If not enough data is given "
"the code will fail.")
psd_options.add_argument("--psd-inverse-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Optional) The maximum length of the impulse"
" response of the overwhitening filter (s)")
psd_options.add_argument("--psd-output", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="(Optional) Write PSD to specified file")
# Options for PSD variation
psd_options.add_argument("--psdvar-short-segment", type=float,
metavar="SECONDS", help="Length of short segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar-long-segment", type=float,
metavar="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar-short-psd-duration", type=float,
metavar="SECONDS", help="Duration of short "
"segments for PSD estimation.")
psd_options.add_argument("--psdvar-short-psd-stride", type=float,
metavar="SECONDS", help="Separation between PSD "
"estimation segments.")
psd_options.add_argument("--psdvar-low-freq", type=float, metavar="HERTZ",
help="Minimum frequency to consider in PSD "
"comparison.")
psd_options.add_argument("--psdvar-high-freq", type=float, metavar="HERTZ",
help="Maximum frequency to consider in PSD "
"comparison.")
return psd_options
|
def function[insert_psd_option_group_multi_ifo, parameter[parser]]:
constant[
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
]
variable[psd_options] assign[=] call[name[parser].add_argument_group, parameter[constant[Options to select the method of PSD generation], constant[The options --psd-model, --psd-file, --asd-file, and --psd-estimation are mutually exclusive.]]]
call[name[psd_options].add_argument, parameter[constant[--psd-model]]]
call[name[psd_options].add_argument, parameter[constant[--psd-file]]]
call[name[psd_options].add_argument, parameter[constant[--asd-file]]]
call[name[psd_options].add_argument, parameter[constant[--psd-estimation]]]
call[name[psd_options].add_argument, parameter[constant[--psd-segment-length]]]
call[name[psd_options].add_argument, parameter[constant[--psd-segment-stride]]]
call[name[psd_options].add_argument, parameter[constant[--psd-num-segments]]]
call[name[psd_options].add_argument, parameter[constant[--psd-inverse-length]]]
call[name[psd_options].add_argument, parameter[constant[--psd-output]]]
call[name[psd_options].add_argument, parameter[constant[--psdvar-short-segment]]]
call[name[psd_options].add_argument, parameter[constant[--psdvar-long-segment]]]
call[name[psd_options].add_argument, parameter[constant[--psdvar-short-psd-duration]]]
call[name[psd_options].add_argument, parameter[constant[--psdvar-short-psd-stride]]]
call[name[psd_options].add_argument, parameter[constant[--psdvar-low-freq]]]
call[name[psd_options].add_argument, parameter[constant[--psdvar-high-freq]]]
return[name[psd_options]]
|
keyword[def] identifier[insert_psd_option_group_multi_ifo] ( identifier[parser] ):
literal[string]
identifier[psd_options] = identifier[parser] . identifier[add_argument_group] (
literal[string] ,
literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] %( literal[string] . identifier[join] ( identifier[get_psd_model_list] ()),))
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[nargs] = literal[string] ,
identifier[default] = keyword[None] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[action] = identifier[MultiDetOptionAction] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] ,
identifier[metavar] = literal[string] , identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] ,
identifier[metavar] = literal[string] , identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] ,
identifier[metavar] = literal[string] , identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] ,
identifier[metavar] = literal[string] , identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[psd_options] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
keyword[return] identifier[psd_options]
|
def insert_psd_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group('Options to select the method of PSD generation', 'The options --psd-model, --psd-file, --asd-file, and --psd-estimation are mutually exclusive.')
psd_options.add_argument('--psd-model', nargs='+', action=MultiDetOptionAction, metavar='IFO:MODEL', help='Get PSD from given analytical model. Choose from %s' % (', '.join(get_psd_model_list()),))
psd_options.add_argument('--psd-file', nargs='+', action=MultiDetOptionAction, metavar='IFO:FILE', help='Get PSD using given PSD ASCII file')
psd_options.add_argument('--asd-file', nargs='+', action=MultiDetOptionAction, metavar='IFO:FILE', help='Get PSD using given ASD ASCII file')
psd_options.add_argument('--psd-estimation', nargs='+', action=MultiDetOptionAction, metavar='IFO:FILE', help='Measure PSD from the data, using given average method. Choose from mean, median or median-mean.')
psd_options.add_argument('--psd-segment-length', type=float, nargs='+', action=MultiDetOptionAction, metavar='IFO:LENGTH', help='(Required for --psd-estimation) The segment length for PSD estimation (s)')
psd_options.add_argument('--psd-segment-stride', type=float, nargs='+', action=MultiDetOptionAction, metavar='IFO:STRIDE', help='(Required for --psd-estimation) The separation between consecutive segments (s)')
psd_options.add_argument('--psd-num-segments', type=int, nargs='+', default=None, action=MultiDetOptionAction, metavar='IFO:NUM', help='(Optional, used only with --psd-estimation). If given PSDs will be estimated using only this number of segments. If more data is given than needed to make this number of segments than excess data will not be used in the PSD estimate. If not enough data is given the code will fail.')
psd_options.add_argument('--psd-inverse-length', type=float, nargs='+', action=MultiDetOptionAction, metavar='IFO:LENGTH', help='(Optional) The maximum length of the impulse response of the overwhitening filter (s)')
psd_options.add_argument('--psd-output', nargs='+', action=MultiDetOptionAction, metavar='IFO:FILE', help='(Optional) Write PSD to specified file')
# Options for PSD variation
psd_options.add_argument('--psdvar-short-segment', type=float, metavar='SECONDS', help='Length of short segment when calculating the PSD variability.')
psd_options.add_argument('--psdvar-long-segment', type=float, metavar='SECONDS', help='Length of long segment when calculating the PSD variability.')
psd_options.add_argument('--psdvar-short-psd-duration', type=float, metavar='SECONDS', help='Duration of short segments for PSD estimation.')
psd_options.add_argument('--psdvar-short-psd-stride', type=float, metavar='SECONDS', help='Separation between PSD estimation segments.')
psd_options.add_argument('--psdvar-low-freq', type=float, metavar='HERTZ', help='Minimum frequency to consider in PSD comparison.')
psd_options.add_argument('--psdvar-high-freq', type=float, metavar='HERTZ', help='Maximum frequency to consider in PSD comparison.')
return psd_options
|
def from_node(index, value):
"""
>>> h = TimelineHistory.from_node(1, 2)
>>> h.lines
[]
"""
try:
lines = json.loads(value)
except (TypeError, ValueError):
lines = None
if not isinstance(lines, list):
lines = []
return TimelineHistory(index, value, lines)
|
def function[from_node, parameter[index, value]]:
constant[
>>> h = TimelineHistory.from_node(1, 2)
>>> h.lines
[]
]
<ast.Try object at 0x7da1b216f2e0>
if <ast.UnaryOp object at 0x7da1b216f070> begin[:]
variable[lines] assign[=] list[[]]
return[call[name[TimelineHistory], parameter[name[index], name[value], name[lines]]]]
|
keyword[def] identifier[from_node] ( identifier[index] , identifier[value] ):
literal[string]
keyword[try] :
identifier[lines] = identifier[json] . identifier[loads] ( identifier[value] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
identifier[lines] = keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[lines] , identifier[list] ):
identifier[lines] =[]
keyword[return] identifier[TimelineHistory] ( identifier[index] , identifier[value] , identifier[lines] )
|
def from_node(index, value):
"""
>>> h = TimelineHistory.from_node(1, 2)
>>> h.lines
[]
"""
try:
lines = json.loads(value) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
lines = None # depends on [control=['except'], data=[]]
if not isinstance(lines, list):
lines = [] # depends on [control=['if'], data=[]]
return TimelineHistory(index, value, lines)
|
def extend(self, protocol: Union[Iterable[Dict], 'Pipeline']) -> 'Pipeline':
"""Add another pipeline to the end of the current pipeline.
:param protocol: An iterable of dictionaries (or another Pipeline)
:return: This pipeline for fluid query building
Example:
>>> p1 = Pipeline.from_functions(['enrich_protein_and_rna_origins'])
>>> p2 = Pipeline.from_functions(['remove_pathologies'])
>>> p1.extend(p2)
"""
for data in protocol:
name, args, kwargs = _get_protocol_tuple(data)
self.append(name, *args, **kwargs)
return self
|
def function[extend, parameter[self, protocol]]:
constant[Add another pipeline to the end of the current pipeline.
:param protocol: An iterable of dictionaries (or another Pipeline)
:return: This pipeline for fluid query building
Example:
>>> p1 = Pipeline.from_functions(['enrich_protein_and_rna_origins'])
>>> p2 = Pipeline.from_functions(['remove_pathologies'])
>>> p1.extend(p2)
]
for taget[name[data]] in starred[name[protocol]] begin[:]
<ast.Tuple object at 0x7da1b0e2c550> assign[=] call[name[_get_protocol_tuple], parameter[name[data]]]
call[name[self].append, parameter[name[name], <ast.Starred object at 0x7da1b0e2ca00>]]
return[name[self]]
|
keyword[def] identifier[extend] ( identifier[self] , identifier[protocol] : identifier[Union] [ identifier[Iterable] [ identifier[Dict] ], literal[string] ])-> literal[string] :
literal[string]
keyword[for] identifier[data] keyword[in] identifier[protocol] :
identifier[name] , identifier[args] , identifier[kwargs] = identifier[_get_protocol_tuple] ( identifier[data] )
identifier[self] . identifier[append] ( identifier[name] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[self]
|
def extend(self, protocol: Union[Iterable[Dict], 'Pipeline']) -> 'Pipeline':
"""Add another pipeline to the end of the current pipeline.
:param protocol: An iterable of dictionaries (or another Pipeline)
:return: This pipeline for fluid query building
Example:
>>> p1 = Pipeline.from_functions(['enrich_protein_and_rna_origins'])
>>> p2 = Pipeline.from_functions(['remove_pathologies'])
>>> p1.extend(p2)
"""
for data in protocol:
(name, args, kwargs) = _get_protocol_tuple(data)
self.append(name, *args, **kwargs) # depends on [control=['for'], data=['data']]
return self
|
def get_sns_topic_arn(topic_name, account, region):
"""Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name
"""
if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'):
return topic_name
session = boto3.Session(profile_name=account, region_name=region)
sns_client = session.client('sns')
topics = sns_client.list_topics()['Topics']
matched_topic = None
for topic in topics:
topic_arn = topic['TopicArn']
if topic_name == topic_arn.split(':')[-1]:
matched_topic = topic_arn
break
else:
LOG.critical("No topic with name %s found.", topic_name)
raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name))
return matched_topic
|
def function[get_sns_topic_arn, parameter[topic_name, account, region]]:
constant[Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name
]
if <ast.BoolOp object at 0x7da207f00c70> begin[:]
return[name[topic_name]]
variable[session] assign[=] call[name[boto3].Session, parameter[]]
variable[sns_client] assign[=] call[name[session].client, parameter[constant[sns]]]
variable[topics] assign[=] call[call[name[sns_client].list_topics, parameter[]]][constant[Topics]]
variable[matched_topic] assign[=] constant[None]
for taget[name[topic]] in starred[name[topics]] begin[:]
variable[topic_arn] assign[=] call[name[topic]][constant[TopicArn]]
if compare[name[topic_name] equal[==] call[call[name[topic_arn].split, parameter[constant[:]]]][<ast.UnaryOp object at 0x7da207f01990>]] begin[:]
variable[matched_topic] assign[=] name[topic_arn]
break
return[name[matched_topic]]
|
keyword[def] identifier[get_sns_topic_arn] ( identifier[topic_name] , identifier[account] , identifier[region] ):
literal[string]
keyword[if] identifier[topic_name] . identifier[count] ( literal[string] )== literal[int] keyword[and] identifier[topic_name] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[topic_name]
identifier[session] = identifier[boto3] . identifier[Session] ( identifier[profile_name] = identifier[account] , identifier[region_name] = identifier[region] )
identifier[sns_client] = identifier[session] . identifier[client] ( literal[string] )
identifier[topics] = identifier[sns_client] . identifier[list_topics] ()[ literal[string] ]
identifier[matched_topic] = keyword[None]
keyword[for] identifier[topic] keyword[in] identifier[topics] :
identifier[topic_arn] = identifier[topic] [ literal[string] ]
keyword[if] identifier[topic_name] == identifier[topic_arn] . identifier[split] ( literal[string] )[- literal[int] ]:
identifier[matched_topic] = identifier[topic_arn]
keyword[break]
keyword[else] :
identifier[LOG] . identifier[critical] ( literal[string] , identifier[topic_name] )
keyword[raise] identifier[SNSTopicNotFound] ( literal[string] . identifier[format] ( identifier[topic_name] ))
keyword[return] identifier[matched_topic]
|
def get_sns_topic_arn(topic_name, account, region):
"""Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name
"""
if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'):
return topic_name # depends on [control=['if'], data=[]]
session = boto3.Session(profile_name=account, region_name=region)
sns_client = session.client('sns')
topics = sns_client.list_topics()['Topics']
matched_topic = None
for topic in topics:
topic_arn = topic['TopicArn']
if topic_name == topic_arn.split(':')[-1]:
matched_topic = topic_arn
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['topic']]
else:
LOG.critical('No topic with name %s found.', topic_name)
raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name))
return matched_topic
|
def set_back(self, x: int, y: int, r: int, g: int, b: int) -> None:
"""Set the background color of one cell.
Args:
x (int): X position to change.
y (int): Y position to change.
r (int): Red background color, from 0 to 255.
g (int): Green background color, from 0 to 255.
b (int): Blue background color, from 0 to 255.
"""
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
|
def function[set_back, parameter[self, x, y, r, g, b]]:
constant[Set the background color of one cell.
Args:
x (int): X position to change.
y (int): Y position to change.
r (int): Red background color, from 0 to 255.
g (int): Green background color, from 0 to 255.
b (int): Blue background color, from 0 to 255.
]
variable[i] assign[=] binary_operation[binary_operation[name[self].width * name[y]] + name[x]]
call[name[self].back_r][name[i]] assign[=] name[r]
call[name[self].back_g][name[i]] assign[=] name[g]
call[name[self].back_b][name[i]] assign[=] name[b]
|
keyword[def] identifier[set_back] ( identifier[self] , identifier[x] : identifier[int] , identifier[y] : identifier[int] , identifier[r] : identifier[int] , identifier[g] : identifier[int] , identifier[b] : identifier[int] )-> keyword[None] :
literal[string]
identifier[i] = identifier[self] . identifier[width] * identifier[y] + identifier[x]
identifier[self] . identifier[back_r] [ identifier[i] ]= identifier[r]
identifier[self] . identifier[back_g] [ identifier[i] ]= identifier[g]
identifier[self] . identifier[back_b] [ identifier[i] ]= identifier[b]
|
def set_back(self, x: int, y: int, r: int, g: int, b: int) -> None:
"""Set the background color of one cell.
Args:
x (int): X position to change.
y (int): Y position to change.
r (int): Red background color, from 0 to 255.
g (int): Green background color, from 0 to 255.
b (int): Blue background color, from 0 to 255.
"""
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
|
async def umount(self):
"""Unmount this partition."""
self._data = await self._handler.unmount(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id)
|
<ast.AsyncFunctionDef object at 0x7da1b26ae650>
|
keyword[async] keyword[def] identifier[umount] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_data] = keyword[await] identifier[self] . identifier[_handler] . identifier[unmount] (
identifier[system_id] = identifier[self] . identifier[block_device] . identifier[node] . identifier[system_id] ,
identifier[device_id] = identifier[self] . identifier[block_device] . identifier[id] , identifier[id] = identifier[self] . identifier[id] )
|
async def umount(self):
"""Unmount this partition."""
self._data = await self._handler.unmount(system_id=self.block_device.node.system_id, device_id=self.block_device.id, id=self.id)
|
def training(self, is_training=True):
'''
Set this layer in the training mode or in predition mode if is_training=False
'''
if is_training:
callJavaFunc(self.value.training)
else:
callJavaFunc(self.value.evaluate)
return self
|
def function[training, parameter[self, is_training]]:
constant[
Set this layer in the training mode or in predition mode if is_training=False
]
if name[is_training] begin[:]
call[name[callJavaFunc], parameter[name[self].value.training]]
return[name[self]]
|
keyword[def] identifier[training] ( identifier[self] , identifier[is_training] = keyword[True] ):
literal[string]
keyword[if] identifier[is_training] :
identifier[callJavaFunc] ( identifier[self] . identifier[value] . identifier[training] )
keyword[else] :
identifier[callJavaFunc] ( identifier[self] . identifier[value] . identifier[evaluate] )
keyword[return] identifier[self]
|
def training(self, is_training=True):
"""
Set this layer in the training mode or in predition mode if is_training=False
"""
if is_training:
callJavaFunc(self.value.training) # depends on [control=['if'], data=[]]
else:
callJavaFunc(self.value.evaluate)
return self
|
def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
"""Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
"""
protein_df = pd.read_feather(protein_feather).set_index('index')
# Combine counts for residue groups
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {
'aa_count_bulk' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'],
'subseqs' : ['metal_2_5D', 'metal_3D']},
'aa_count_carb' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_chrg' : {'residues': _aa_property_dict_one['Charged'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D',
'surface_3D']},
'aa_count_poschrg' : {'residues': _aa_property_dict_one['Basic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_negchrg' : {'residues': _aa_property_dict_one['Acidic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_tmstab' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_dis' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']},
'aa_count_ord' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']}}
# Do combination counts for all types of subsequences
for suffix, info in aggregators.items():
agg_residues = info['residues']
for prefix in info['subseqs']:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = prefix + '_aa_count_' + agg_res
if to_add_idx in protein_df.index:
to_add_idxes.append(to_add_idx)
subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() # Add each residue series
protein_df.loc[prefix + '_' + suffix] = subseq_agged_col # Append to df
## REMOVE OTHER STRAINS WITH DELETIONS (use float -- length_filter_pid=0.8 to get only strains with >80% length
## alternative to atlas2.calculate_residue_counts_perstrain wt_pid_cutoff param -- works a little differently just considering length
if length_filter_pid:
keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index
protein_df = protein_df[keep_cols]
# Multiply by proteomics copy number?
if copynum_scale:
if not isinstance(copynum_df, pd.DataFrame):
raise ValueError('Please supply copy numbers')
protein_id = op.basename(protein_feather).split('_protein')[0]
if protein_id in copynum_df.index:
copynum = copynum_df.at[protein_id, 'copynum']
if copynum > 0: # TODO: currently keeping one copy of proteins with 0, is that ok?
protein_df = protein_df * copynum
return protein_df
|
def function[load_feather, parameter[protein_feather, length_filter_pid, copynum_scale, copynum_df]]:
constant[Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
]
variable[protein_df] assign[=] call[call[name[pd].read_feather, parameter[name[protein_feather]]].set_index, parameter[constant[index]]]
from relative_module[ssbio.protein.sequence.properties.residues] import module[_aa_property_dict_one], module[EXTENDED_AA_PROPERTY_DICT_ONE]
variable[aggregators] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e47040>, <ast.Constant object at 0x7da1b0e44040>, <ast.Constant object at 0x7da1b0e457b0>, <ast.Constant object at 0x7da1b0e47460>, <ast.Constant object at 0x7da1b0e45150>, <ast.Constant object at 0x7da1b0e461d0>, <ast.Constant object at 0x7da1b0e461a0>, <ast.Constant object at 0x7da1b0e47910>, <ast.Constant object at 0x7da20c991420>], [<ast.Dict object at 0x7da20c992080>, <ast.Dict object at 0x7da20c990d60>, <ast.Dict object at 0x7da20c993ee0>, <ast.Dict object at 0x7da20c992bf0>, <ast.Dict object at 0x7da20c9902e0>, <ast.Dict object at 0x7da1b26ae4d0>, <ast.Dict object at 0x7da1b26acfa0>, <ast.Dict object at 0x7da1b0e2e5f0>, <ast.Dict object at 0x7da1b0e2db40>]]
for taget[tuple[[<ast.Name object at 0x7da1b0e2c550>, <ast.Name object at 0x7da1b0e2df60>]]] in starred[call[name[aggregators].items, parameter[]]] begin[:]
variable[agg_residues] assign[=] call[name[info]][constant[residues]]
for taget[name[prefix]] in starred[call[name[info]][constant[subseqs]]] begin[:]
variable[to_add_idxes] assign[=] list[[]]
for taget[name[agg_res]] in starred[name[agg_residues]] begin[:]
variable[to_add_idx] assign[=] binary_operation[binary_operation[name[prefix] + constant[_aa_count_]] + name[agg_res]]
if compare[name[to_add_idx] in name[protein_df].index] begin[:]
call[name[to_add_idxes].append, parameter[name[to_add_idx]]]
variable[subseq_agged_col] assign[=] call[call[name[protein_df].loc][tuple[[<ast.Name object at 0x7da1b0e2c220>, <ast.Slice object at 0x7da1b0e2dc90>]]].sum, parameter[]]
call[name[protein_df].loc][binary_operation[binary_operation[name[prefix] + constant[_]] + name[suffix]]] assign[=] name[subseq_agged_col]
if name[length_filter_pid] begin[:]
variable[keep_cols] assign[=] call[call[name[protein_df].loc][constant[aa_count_total]]][compare[call[name[protein_df].loc][constant[aa_count_total]] greater[>] binary_operation[call[name[protein_df].at][tuple[[<ast.Constant object at 0x7da1b0e2e2f0>, <ast.Constant object at 0x7da1b0e2e530>]]] * name[length_filter_pid]]]].index
variable[protein_df] assign[=] call[name[protein_df]][name[keep_cols]]
if name[copynum_scale] begin[:]
if <ast.UnaryOp object at 0x7da1b0e2c8e0> begin[:]
<ast.Raise object at 0x7da1b0e2c610>
variable[protein_id] assign[=] call[call[call[name[op].basename, parameter[name[protein_feather]]].split, parameter[constant[_protein]]]][constant[0]]
if compare[name[protein_id] in name[copynum_df].index] begin[:]
variable[copynum] assign[=] call[name[copynum_df].at][tuple[[<ast.Name object at 0x7da18eb57c40>, <ast.Constant object at 0x7da18eb555a0>]]]
if compare[name[copynum] greater[>] constant[0]] begin[:]
variable[protein_df] assign[=] binary_operation[name[protein_df] * name[copynum]]
return[name[protein_df]]
|
keyword[def] identifier[load_feather] ( identifier[protein_feather] , identifier[length_filter_pid] = keyword[None] , identifier[copynum_scale] = keyword[False] , identifier[copynum_df] = keyword[None] ):
literal[string]
identifier[protein_df] = identifier[pd] . identifier[read_feather] ( identifier[protein_feather] ). identifier[set_index] ( literal[string] )
keyword[from] identifier[ssbio] . identifier[protein] . identifier[sequence] . identifier[properties] . identifier[residues] keyword[import] identifier[_aa_property_dict_one] , identifier[EXTENDED_AA_PROPERTY_DICT_ONE]
identifier[aggregators] ={
literal[string] :{ literal[string] : identifier[EXTENDED_AA_PROPERTY_DICT_ONE] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] ]},
literal[string] :{ literal[string] : identifier[EXTENDED_AA_PROPERTY_DICT_ONE] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]},
literal[string] :{ literal[string] : identifier[_aa_property_dict_one] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ]},
literal[string] :{ literal[string] : identifier[_aa_property_dict_one] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]},
literal[string] :{ literal[string] : identifier[_aa_property_dict_one] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]},
literal[string] :{ literal[string] : identifier[EXTENDED_AA_PROPERTY_DICT_ONE] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] ]},
literal[string] :{ literal[string] : identifier[EXTENDED_AA_PROPERTY_DICT_ONE] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] ]},
literal[string] :{ literal[string] : identifier[EXTENDED_AA_PROPERTY_DICT_ONE] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ]},
literal[string] :{ literal[string] : identifier[EXTENDED_AA_PROPERTY_DICT_ONE] [ literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ]}}
keyword[for] identifier[suffix] , identifier[info] keyword[in] identifier[aggregators] . identifier[items] ():
identifier[agg_residues] = identifier[info] [ literal[string] ]
keyword[for] identifier[prefix] keyword[in] identifier[info] [ literal[string] ]:
identifier[to_add_idxes] =[]
keyword[for] identifier[agg_res] keyword[in] identifier[agg_residues] :
identifier[to_add_idx] = identifier[prefix] + literal[string] + identifier[agg_res]
keyword[if] identifier[to_add_idx] keyword[in] identifier[protein_df] . identifier[index] :
identifier[to_add_idxes] . identifier[append] ( identifier[to_add_idx] )
identifier[subseq_agged_col] = identifier[protein_df] . identifier[loc] [ identifier[to_add_idxes] ,:]. identifier[sum] ()
identifier[protein_df] . identifier[loc] [ identifier[prefix] + literal[string] + identifier[suffix] ]= identifier[subseq_agged_col]
keyword[if] identifier[length_filter_pid] :
identifier[keep_cols] = identifier[protein_df] . identifier[loc] [ literal[string] ][ identifier[protein_df] . identifier[loc] [ literal[string] ]> identifier[protein_df] . identifier[at] [ literal[string] , literal[string] ]* identifier[length_filter_pid] ]. identifier[index]
identifier[protein_df] = identifier[protein_df] [ identifier[keep_cols] ]
keyword[if] identifier[copynum_scale] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[copynum_df] , identifier[pd] . identifier[DataFrame] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[protein_id] = identifier[op] . identifier[basename] ( identifier[protein_feather] ). identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[protein_id] keyword[in] identifier[copynum_df] . identifier[index] :
identifier[copynum] = identifier[copynum_df] . identifier[at] [ identifier[protein_id] , literal[string] ]
keyword[if] identifier[copynum] > literal[int] :
identifier[protein_df] = identifier[protein_df] * identifier[copynum]
keyword[return] identifier[protein_df]
|
def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
"""Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
"""
protein_df = pd.read_feather(protein_feather).set_index('index')
# Combine counts for residue groups
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {'aa_count_bulk': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'], 'subseqs': ['metal_2_5D', 'metal_3D']}, 'aa_count_carb': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_chrg': {'residues': _aa_property_dict_one['Charged'], 'subseqs': ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_poschrg': {'residues': _aa_property_dict_one['Basic'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_negchrg': {'residues': _aa_property_dict_one['Acidic'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_tmstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'], 'subseqs': ['tm_2D', 'tm_3D']}, 'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'], 'subseqs': ['tm_2D', 'tm_3D']}, 'aa_count_dis': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'], 'subseqs': ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'dna_2_5D']}, 'aa_count_ord': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'], 'subseqs': ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'dna_2_5D']}}
# Do combination counts for all types of subsequences
for (suffix, info) in aggregators.items():
agg_residues = info['residues']
for prefix in info['subseqs']:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = prefix + '_aa_count_' + agg_res
if to_add_idx in protein_df.index:
to_add_idxes.append(to_add_idx) # depends on [control=['if'], data=['to_add_idx']] # depends on [control=['for'], data=['agg_res']]
subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() # Add each residue series
protein_df.loc[prefix + '_' + suffix] = subseq_agged_col # Append to df # depends on [control=['for'], data=['prefix']] # depends on [control=['for'], data=[]]
## REMOVE OTHER STRAINS WITH DELETIONS (use float -- length_filter_pid=0.8 to get only strains with >80% length
## alternative to atlas2.calculate_residue_counts_perstrain wt_pid_cutoff param -- works a little differently just considering length
if length_filter_pid:
keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index
protein_df = protein_df[keep_cols] # depends on [control=['if'], data=[]]
# Multiply by proteomics copy number?
if copynum_scale:
if not isinstance(copynum_df, pd.DataFrame):
raise ValueError('Please supply copy numbers') # depends on [control=['if'], data=[]]
protein_id = op.basename(protein_feather).split('_protein')[0]
if protein_id in copynum_df.index:
copynum = copynum_df.at[protein_id, 'copynum']
if copynum > 0: # TODO: currently keeping one copy of proteins with 0, is that ok?
protein_df = protein_df * copynum # depends on [control=['if'], data=['copynum']] # depends on [control=['if'], data=['protein_id']] # depends on [control=['if'], data=[]]
return protein_df
|
def get_all_suppliers(self, params=None):
"""
Get all suppliers
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(
get_function=self.get_suppliers_per_page,
resource=SUPPLIERS,
**{'params': params}
)
|
def function[get_all_suppliers, parameter[self, params]]:
constant[
Get all suppliers
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
]
if <ast.UnaryOp object at 0x7da20c6e6f50> begin[:]
variable[params] assign[=] dictionary[[], []]
return[call[name[self]._iterate_through_pages, parameter[]]]
|
keyword[def] identifier[get_all_suppliers] ( identifier[self] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[params] :
identifier[params] ={}
keyword[return] identifier[self] . identifier[_iterate_through_pages] (
identifier[get_function] = identifier[self] . identifier[get_suppliers_per_page] ,
identifier[resource] = identifier[SUPPLIERS] ,
**{ literal[string] : identifier[params] }
)
|
def get_all_suppliers(self, params=None):
"""
Get all suppliers
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {} # depends on [control=['if'], data=[]]
return self._iterate_through_pages(get_function=self.get_suppliers_per_page, resource=SUPPLIERS, **{'params': params})
|
def _format_trace(trace):
"""
Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string.
"""
lines = []
for fname, lineno, func, src, _ in trace:
if src:
for line in src:
lines.append(' '+line.strip()+'\n')
lines.append(' %s:%4d in %s\n' % (fname, lineno, func))
return ''.join(lines)
|
def function[_format_trace, parameter[trace]]:
constant[
Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string.
]
variable[lines] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2054a4fd0>, <ast.Name object at 0x7da2054a4280>, <ast.Name object at 0x7da2054a7e50>, <ast.Name object at 0x7da2054a72e0>, <ast.Name object at 0x7da2054a7c70>]]] in starred[name[trace]] begin[:]
if name[src] begin[:]
for taget[name[line]] in starred[name[src]] begin[:]
call[name[lines].append, parameter[binary_operation[binary_operation[constant[ ] + call[name[line].strip, parameter[]]] + constant[
]]]]
call[name[lines].append, parameter[binary_operation[constant[ %s:%4d in %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a5d20>, <ast.Name object at 0x7da2054a53c0>, <ast.Name object at 0x7da2054a5d50>]]]]]
return[call[constant[].join, parameter[name[lines]]]]
|
keyword[def] identifier[_format_trace] ( identifier[trace] ):
literal[string]
identifier[lines] =[]
keyword[for] identifier[fname] , identifier[lineno] , identifier[func] , identifier[src] , identifier[_] keyword[in] identifier[trace] :
keyword[if] identifier[src] :
keyword[for] identifier[line] keyword[in] identifier[src] :
identifier[lines] . identifier[append] ( literal[string] + identifier[line] . identifier[strip] ()+ literal[string] )
identifier[lines] . identifier[append] ( literal[string] %( identifier[fname] , identifier[lineno] , identifier[func] ))
keyword[return] literal[string] . identifier[join] ( identifier[lines] )
|
def _format_trace(trace):
"""
Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string.
"""
lines = []
for (fname, lineno, func, src, _) in trace:
if src:
for line in src:
lines.append(' ' + line.strip() + '\n') # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
lines.append(' %s:%4d in %s\n' % (fname, lineno, func)) # depends on [control=['for'], data=[]]
return ''.join(lines)
|
def sys_status_send(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4, force_mavlink1=False):
'''
The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
'''
return self.send(self.sys_status_encode(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4), force_mavlink1=force_mavlink1)
|
def function[sys_status_send, parameter[self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4, force_mavlink1]]:
constant[
The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
]
return[call[name[self].send, parameter[call[name[self].sys_status_encode, parameter[name[onboard_control_sensors_present], name[onboard_control_sensors_enabled], name[onboard_control_sensors_health], name[load], name[voltage_battery], name[current_battery], name[battery_remaining], name[drop_rate_comm], name[errors_comm], name[errors_count1], name[errors_count2], name[errors_count3], name[errors_count4]]]]]]
|
keyword[def] identifier[sys_status_send] ( identifier[self] , identifier[onboard_control_sensors_present] , identifier[onboard_control_sensors_enabled] , identifier[onboard_control_sensors_health] , identifier[load] , identifier[voltage_battery] , identifier[current_battery] , identifier[battery_remaining] , identifier[drop_rate_comm] , identifier[errors_comm] , identifier[errors_count1] , identifier[errors_count2] , identifier[errors_count3] , identifier[errors_count4] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[sys_status_encode] ( identifier[onboard_control_sensors_present] , identifier[onboard_control_sensors_enabled] , identifier[onboard_control_sensors_health] , identifier[load] , identifier[voltage_battery] , identifier[current_battery] , identifier[battery_remaining] , identifier[drop_rate_comm] , identifier[errors_comm] , identifier[errors_count1] , identifier[errors_count2] , identifier[errors_count3] , identifier[errors_count4] ), identifier[force_mavlink1] = identifier[force_mavlink1] )
|
def sys_status_send(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4, force_mavlink1=False):
"""
The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
"""
return self.send(self.sys_status_encode(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4), force_mavlink1=force_mavlink1)
|
def _generic_placeobject_parser(self, obj, version):
"""A generic parser for several PlaceObjectX."""
bc = BitConsumer(self._src)
obj.PlaceFlagHasClipActions = bc.u_get(1)
obj.PlaceFlagHasClipDepth = bc.u_get(1)
obj.PlaceFlagHasName = bc.u_get(1)
obj.PlaceFlagHasRatio = bc.u_get(1)
obj.PlaceFlagHasColorTransform = bc.u_get(1)
obj.PlaceFlagHasMatrix = bc.u_get(1)
obj.PlaceFlagHasCharacter = bc.u_get(1)
obj.PlaceFlagMove = bc.u_get(1)
if version == 3:
obj.Reserved = bc.u_get(1)
obj.PlaceFlagOpaqueBackground = bc.u_get(1)
obj.PlaceFlagHasVisible = bc.u_get(1)
obj.PlaceFlagHasImage = bc.u_get(1)
obj.PlaceFlagHasClassName = bc.u_get(1)
obj.PlaceFlagHasCacheAsBitmap = bc.u_get(1)
obj.PlaceFlagHasBlendMode = bc.u_get(1)
obj.PlaceFlagHasFilterList = bc.u_get(1)
obj.Depth = unpack_ui16(self._src)
if version == 3:
if obj.PlaceFlagHasClassName or (
obj.PlaceFlagHasImage and obj.PlaceFlagHasCharacter):
obj.ClassName = self._get_struct_string()
if obj.PlaceFlagHasCharacter:
obj.CharacterId = unpack_ui16(self._src)
if obj.PlaceFlagHasMatrix:
obj.Matrix = self._get_struct_matrix()
if obj.PlaceFlagHasColorTransform:
obj.ColorTransform = self._get_struct_cxformwithalpha()
if obj.PlaceFlagHasRatio:
obj.Ratio = unpack_ui16(self._src)
if obj.PlaceFlagHasName:
obj.Name = self._get_struct_string()
if obj.PlaceFlagHasClipDepth:
obj.ClipDepth = unpack_ui16(self._src)
if version == 3:
if obj.PlaceFlagHasFilterList:
obj.SurfaceFilterList = self._get_struct_filterlist()
if obj.PlaceFlagHasBlendMode:
obj.BlendMode = unpack_ui8(self._src)
if obj.PlaceFlagHasCacheAsBitmap:
obj.BitmapCache = unpack_ui8(self._src)
if obj.PlaceFlagHasVisible:
obj.Visible = unpack_ui8(self._src)
obj.BackgroundColor = self._get_struct_rgba()
if obj.PlaceFlagHasClipActions:
obj.ClipActions = self._get_struct_clipactions()
|
def function[_generic_placeobject_parser, parameter[self, obj, version]]:
constant[A generic parser for several PlaceObjectX.]
variable[bc] assign[=] call[name[BitConsumer], parameter[name[self]._src]]
name[obj].PlaceFlagHasClipActions assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasClipDepth assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasName assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasRatio assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasColorTransform assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasMatrix assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasCharacter assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagMove assign[=] call[name[bc].u_get, parameter[constant[1]]]
if compare[name[version] equal[==] constant[3]] begin[:]
name[obj].Reserved assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagOpaqueBackground assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasVisible assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasImage assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasClassName assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasCacheAsBitmap assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasBlendMode assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].PlaceFlagHasFilterList assign[=] call[name[bc].u_get, parameter[constant[1]]]
name[obj].Depth assign[=] call[name[unpack_ui16], parameter[name[self]._src]]
if compare[name[version] equal[==] constant[3]] begin[:]
if <ast.BoolOp object at 0x7da1b2545d20> begin[:]
name[obj].ClassName assign[=] call[name[self]._get_struct_string, parameter[]]
if name[obj].PlaceFlagHasCharacter begin[:]
name[obj].CharacterId assign[=] call[name[unpack_ui16], parameter[name[self]._src]]
if name[obj].PlaceFlagHasMatrix begin[:]
name[obj].Matrix assign[=] call[name[self]._get_struct_matrix, parameter[]]
if name[obj].PlaceFlagHasColorTransform begin[:]
name[obj].ColorTransform assign[=] call[name[self]._get_struct_cxformwithalpha, parameter[]]
if name[obj].PlaceFlagHasRatio begin[:]
name[obj].Ratio assign[=] call[name[unpack_ui16], parameter[name[self]._src]]
if name[obj].PlaceFlagHasName begin[:]
name[obj].Name assign[=] call[name[self]._get_struct_string, parameter[]]
if name[obj].PlaceFlagHasClipDepth begin[:]
name[obj].ClipDepth assign[=] call[name[unpack_ui16], parameter[name[self]._src]]
if compare[name[version] equal[==] constant[3]] begin[:]
if name[obj].PlaceFlagHasFilterList begin[:]
name[obj].SurfaceFilterList assign[=] call[name[self]._get_struct_filterlist, parameter[]]
if name[obj].PlaceFlagHasBlendMode begin[:]
name[obj].BlendMode assign[=] call[name[unpack_ui8], parameter[name[self]._src]]
if name[obj].PlaceFlagHasCacheAsBitmap begin[:]
name[obj].BitmapCache assign[=] call[name[unpack_ui8], parameter[name[self]._src]]
if name[obj].PlaceFlagHasVisible begin[:]
name[obj].Visible assign[=] call[name[unpack_ui8], parameter[name[self]._src]]
name[obj].BackgroundColor assign[=] call[name[self]._get_struct_rgba, parameter[]]
if name[obj].PlaceFlagHasClipActions begin[:]
name[obj].ClipActions assign[=] call[name[self]._get_struct_clipactions, parameter[]]
|
keyword[def] identifier[_generic_placeobject_parser] ( identifier[self] , identifier[obj] , identifier[version] ):
literal[string]
identifier[bc] = identifier[BitConsumer] ( identifier[self] . identifier[_src] )
identifier[obj] . identifier[PlaceFlagHasClipActions] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasClipDepth] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasName] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasRatio] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasColorTransform] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasMatrix] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasCharacter] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagMove] = identifier[bc] . identifier[u_get] ( literal[int] )
keyword[if] identifier[version] == literal[int] :
identifier[obj] . identifier[Reserved] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagOpaqueBackground] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasVisible] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasImage] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasClassName] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasCacheAsBitmap] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasBlendMode] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[PlaceFlagHasFilterList] = identifier[bc] . identifier[u_get] ( literal[int] )
identifier[obj] . identifier[Depth] = identifier[unpack_ui16] ( identifier[self] . identifier[_src] )
keyword[if] identifier[version] == literal[int] :
keyword[if] identifier[obj] . identifier[PlaceFlagHasClassName] keyword[or] (
identifier[obj] . identifier[PlaceFlagHasImage] keyword[and] identifier[obj] . identifier[PlaceFlagHasCharacter] ):
identifier[obj] . identifier[ClassName] = identifier[self] . identifier[_get_struct_string] ()
keyword[if] identifier[obj] . identifier[PlaceFlagHasCharacter] :
identifier[obj] . identifier[CharacterId] = identifier[unpack_ui16] ( identifier[self] . identifier[_src] )
keyword[if] identifier[obj] . identifier[PlaceFlagHasMatrix] :
identifier[obj] . identifier[Matrix] = identifier[self] . identifier[_get_struct_matrix] ()
keyword[if] identifier[obj] . identifier[PlaceFlagHasColorTransform] :
identifier[obj] . identifier[ColorTransform] = identifier[self] . identifier[_get_struct_cxformwithalpha] ()
keyword[if] identifier[obj] . identifier[PlaceFlagHasRatio] :
identifier[obj] . identifier[Ratio] = identifier[unpack_ui16] ( identifier[self] . identifier[_src] )
keyword[if] identifier[obj] . identifier[PlaceFlagHasName] :
identifier[obj] . identifier[Name] = identifier[self] . identifier[_get_struct_string] ()
keyword[if] identifier[obj] . identifier[PlaceFlagHasClipDepth] :
identifier[obj] . identifier[ClipDepth] = identifier[unpack_ui16] ( identifier[self] . identifier[_src] )
keyword[if] identifier[version] == literal[int] :
keyword[if] identifier[obj] . identifier[PlaceFlagHasFilterList] :
identifier[obj] . identifier[SurfaceFilterList] = identifier[self] . identifier[_get_struct_filterlist] ()
keyword[if] identifier[obj] . identifier[PlaceFlagHasBlendMode] :
identifier[obj] . identifier[BlendMode] = identifier[unpack_ui8] ( identifier[self] . identifier[_src] )
keyword[if] identifier[obj] . identifier[PlaceFlagHasCacheAsBitmap] :
identifier[obj] . identifier[BitmapCache] = identifier[unpack_ui8] ( identifier[self] . identifier[_src] )
keyword[if] identifier[obj] . identifier[PlaceFlagHasVisible] :
identifier[obj] . identifier[Visible] = identifier[unpack_ui8] ( identifier[self] . identifier[_src] )
identifier[obj] . identifier[BackgroundColor] = identifier[self] . identifier[_get_struct_rgba] ()
keyword[if] identifier[obj] . identifier[PlaceFlagHasClipActions] :
identifier[obj] . identifier[ClipActions] = identifier[self] . identifier[_get_struct_clipactions] ()
|
def _generic_placeobject_parser(self, obj, version):
"""A generic parser for several PlaceObjectX."""
bc = BitConsumer(self._src)
obj.PlaceFlagHasClipActions = bc.u_get(1)
obj.PlaceFlagHasClipDepth = bc.u_get(1)
obj.PlaceFlagHasName = bc.u_get(1)
obj.PlaceFlagHasRatio = bc.u_get(1)
obj.PlaceFlagHasColorTransform = bc.u_get(1)
obj.PlaceFlagHasMatrix = bc.u_get(1)
obj.PlaceFlagHasCharacter = bc.u_get(1)
obj.PlaceFlagMove = bc.u_get(1)
if version == 3:
obj.Reserved = bc.u_get(1)
obj.PlaceFlagOpaqueBackground = bc.u_get(1)
obj.PlaceFlagHasVisible = bc.u_get(1)
obj.PlaceFlagHasImage = bc.u_get(1)
obj.PlaceFlagHasClassName = bc.u_get(1)
obj.PlaceFlagHasCacheAsBitmap = bc.u_get(1)
obj.PlaceFlagHasBlendMode = bc.u_get(1)
obj.PlaceFlagHasFilterList = bc.u_get(1) # depends on [control=['if'], data=[]]
obj.Depth = unpack_ui16(self._src)
if version == 3:
if obj.PlaceFlagHasClassName or (obj.PlaceFlagHasImage and obj.PlaceFlagHasCharacter):
obj.ClassName = self._get_struct_string() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasCharacter:
obj.CharacterId = unpack_ui16(self._src) # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasMatrix:
obj.Matrix = self._get_struct_matrix() # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasColorTransform:
obj.ColorTransform = self._get_struct_cxformwithalpha() # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasRatio:
obj.Ratio = unpack_ui16(self._src) # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasName:
obj.Name = self._get_struct_string() # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasClipDepth:
obj.ClipDepth = unpack_ui16(self._src) # depends on [control=['if'], data=[]]
if version == 3:
if obj.PlaceFlagHasFilterList:
obj.SurfaceFilterList = self._get_struct_filterlist() # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasBlendMode:
obj.BlendMode = unpack_ui8(self._src) # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasCacheAsBitmap:
obj.BitmapCache = unpack_ui8(self._src) # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasVisible:
obj.Visible = unpack_ui8(self._src)
obj.BackgroundColor = self._get_struct_rgba() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if obj.PlaceFlagHasClipActions:
obj.ClipActions = self._get_struct_clipactions() # depends on [control=['if'], data=[]]
|
def to_html(self, show_mean=None, sortable=None, colorize=True, *args,
**kwargs):
"""Extend Pandas built in `to_html` method for rendering a DataFrame
and use it to render a ScoreMatrix."""
if show_mean is None:
show_mean = self.show_mean
if sortable is None:
sortable = self.sortable
df = self.copy()
if show_mean:
df.insert(0, 'Mean', None)
df.loc[:, 'Mean'] = ['%.3f' % self[m].mean() for m in self.models]
html = df.to_html(*args, **kwargs) # Pandas method
html, table_id = self.annotate(df, html, show_mean, colorize)
if sortable:
self.dynamify(table_id)
return html
|
def function[to_html, parameter[self, show_mean, sortable, colorize]]:
constant[Extend Pandas built in `to_html` method for rendering a DataFrame
and use it to render a ScoreMatrix.]
if compare[name[show_mean] is constant[None]] begin[:]
variable[show_mean] assign[=] name[self].show_mean
if compare[name[sortable] is constant[None]] begin[:]
variable[sortable] assign[=] name[self].sortable
variable[df] assign[=] call[name[self].copy, parameter[]]
if name[show_mean] begin[:]
call[name[df].insert, parameter[constant[0], constant[Mean], constant[None]]]
call[name[df].loc][tuple[[<ast.Slice object at 0x7da1b0ebd570>, <ast.Constant object at 0x7da1b0ebd900>]]] assign[=] <ast.ListComp object at 0x7da1b0ebe620>
variable[html] assign[=] call[name[df].to_html, parameter[<ast.Starred object at 0x7da1b0ebc3a0>]]
<ast.Tuple object at 0x7da1b0ebcbb0> assign[=] call[name[self].annotate, parameter[name[df], name[html], name[show_mean], name[colorize]]]
if name[sortable] begin[:]
call[name[self].dynamify, parameter[name[table_id]]]
return[name[html]]
|
keyword[def] identifier[to_html] ( identifier[self] , identifier[show_mean] = keyword[None] , identifier[sortable] = keyword[None] , identifier[colorize] = keyword[True] ,* identifier[args] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[show_mean] keyword[is] keyword[None] :
identifier[show_mean] = identifier[self] . identifier[show_mean]
keyword[if] identifier[sortable] keyword[is] keyword[None] :
identifier[sortable] = identifier[self] . identifier[sortable]
identifier[df] = identifier[self] . identifier[copy] ()
keyword[if] identifier[show_mean] :
identifier[df] . identifier[insert] ( literal[int] , literal[string] , keyword[None] )
identifier[df] . identifier[loc] [:, literal[string] ]=[ literal[string] % identifier[self] [ identifier[m] ]. identifier[mean] () keyword[for] identifier[m] keyword[in] identifier[self] . identifier[models] ]
identifier[html] = identifier[df] . identifier[to_html] (* identifier[args] ,** identifier[kwargs] )
identifier[html] , identifier[table_id] = identifier[self] . identifier[annotate] ( identifier[df] , identifier[html] , identifier[show_mean] , identifier[colorize] )
keyword[if] identifier[sortable] :
identifier[self] . identifier[dynamify] ( identifier[table_id] )
keyword[return] identifier[html]
|
def to_html(self, show_mean=None, sortable=None, colorize=True, *args, **kwargs):
"""Extend Pandas built in `to_html` method for rendering a DataFrame
and use it to render a ScoreMatrix."""
if show_mean is None:
show_mean = self.show_mean # depends on [control=['if'], data=['show_mean']]
if sortable is None:
sortable = self.sortable # depends on [control=['if'], data=['sortable']]
df = self.copy()
if show_mean:
df.insert(0, 'Mean', None)
df.loc[:, 'Mean'] = ['%.3f' % self[m].mean() for m in self.models] # depends on [control=['if'], data=[]]
html = df.to_html(*args, **kwargs) # Pandas method
(html, table_id) = self.annotate(df, html, show_mean, colorize)
if sortable:
self.dynamify(table_id) # depends on [control=['if'], data=[]]
return html
|
def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps):
"""This is a temp hack to write the minimal metadata that Squonk needs.
Will needs to be replaced with something that allows something more complete to be written.
:param outputBase: Base name for the file to write to
:param thinOutput: Write only new data, not structures. Result type will be BasicObject
:param valueClasses: A dict that describes the Java class of the value properties (used by Squonk)
:param datasetMetaProps: A dict with metadata properties that describe the datset as a whole.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
:param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value
is the name of the field being described, and a key name values wholes values is a map of metadata properties.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
"""
meta = {}
props = {}
# TODO add created property - how to handle date formats?
if datasetMetaProps:
props.update(datasetMetaProps)
if fieldMetaProps:
meta["fieldMetaProps"] = fieldMetaProps
if len(props) > 0:
meta["properties"] = props
if valueClassMappings:
meta["valueClassMappings"] = valueClassMappings
if thinOutput:
meta['type'] = 'org.squonk.types.BasicObject'
else:
meta['type'] = 'org.squonk.types.MoleculeObject'
s = json.dumps(meta)
meta = open(outputBase + '.metadata', 'w')
meta.write(s)
meta.close()
|
def function[write_squonk_datasetmetadata, parameter[outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps]]:
constant[This is a temp hack to write the minimal metadata that Squonk needs.
Will needs to be replaced with something that allows something more complete to be written.
:param outputBase: Base name for the file to write to
:param thinOutput: Write only new data, not structures. Result type will be BasicObject
:param valueClasses: A dict that describes the Java class of the value properties (used by Squonk)
:param datasetMetaProps: A dict with metadata properties that describe the datset as a whole.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
:param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value
is the name of the field being described, and a key name values wholes values is a map of metadata properties.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
]
variable[meta] assign[=] dictionary[[], []]
variable[props] assign[=] dictionary[[], []]
if name[datasetMetaProps] begin[:]
call[name[props].update, parameter[name[datasetMetaProps]]]
if name[fieldMetaProps] begin[:]
call[name[meta]][constant[fieldMetaProps]] assign[=] name[fieldMetaProps]
if compare[call[name[len], parameter[name[props]]] greater[>] constant[0]] begin[:]
call[name[meta]][constant[properties]] assign[=] name[props]
if name[valueClassMappings] begin[:]
call[name[meta]][constant[valueClassMappings]] assign[=] name[valueClassMappings]
if name[thinOutput] begin[:]
call[name[meta]][constant[type]] assign[=] constant[org.squonk.types.BasicObject]
variable[s] assign[=] call[name[json].dumps, parameter[name[meta]]]
variable[meta] assign[=] call[name[open], parameter[binary_operation[name[outputBase] + constant[.metadata]], constant[w]]]
call[name[meta].write, parameter[name[s]]]
call[name[meta].close, parameter[]]
|
keyword[def] identifier[write_squonk_datasetmetadata] ( identifier[outputBase] , identifier[thinOutput] , identifier[valueClassMappings] , identifier[datasetMetaProps] , identifier[fieldMetaProps] ):
literal[string]
identifier[meta] ={}
identifier[props] ={}
keyword[if] identifier[datasetMetaProps] :
identifier[props] . identifier[update] ( identifier[datasetMetaProps] )
keyword[if] identifier[fieldMetaProps] :
identifier[meta] [ literal[string] ]= identifier[fieldMetaProps]
keyword[if] identifier[len] ( identifier[props] )> literal[int] :
identifier[meta] [ literal[string] ]= identifier[props]
keyword[if] identifier[valueClassMappings] :
identifier[meta] [ literal[string] ]= identifier[valueClassMappings]
keyword[if] identifier[thinOutput] :
identifier[meta] [ literal[string] ]= literal[string]
keyword[else] :
identifier[meta] [ literal[string] ]= literal[string]
identifier[s] = identifier[json] . identifier[dumps] ( identifier[meta] )
identifier[meta] = identifier[open] ( identifier[outputBase] + literal[string] , literal[string] )
identifier[meta] . identifier[write] ( identifier[s] )
identifier[meta] . identifier[close] ()
|
def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps):
"""This is a temp hack to write the minimal metadata that Squonk needs.
Will needs to be replaced with something that allows something more complete to be written.
:param outputBase: Base name for the file to write to
:param thinOutput: Write only new data, not structures. Result type will be BasicObject
:param valueClasses: A dict that describes the Java class of the value properties (used by Squonk)
:param datasetMetaProps: A dict with metadata properties that describe the datset as a whole.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
:param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value
is the name of the field being described, and a key name values wholes values is a map of metadata properties.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
"""
meta = {}
props = {}
# TODO add created property - how to handle date formats?
if datasetMetaProps:
props.update(datasetMetaProps) # depends on [control=['if'], data=[]]
if fieldMetaProps:
meta['fieldMetaProps'] = fieldMetaProps # depends on [control=['if'], data=[]]
if len(props) > 0:
meta['properties'] = props # depends on [control=['if'], data=[]]
if valueClassMappings:
meta['valueClassMappings'] = valueClassMappings # depends on [control=['if'], data=[]]
if thinOutput:
meta['type'] = 'org.squonk.types.BasicObject' # depends on [control=['if'], data=[]]
else:
meta['type'] = 'org.squonk.types.MoleculeObject'
s = json.dumps(meta)
meta = open(outputBase + '.metadata', 'w')
meta.write(s)
meta.close()
|
def calculate_time_to_reset(self):
"""Calculate the seconds to reset the token requests, by obtaining the different
between the current date and the next date when the token is fully regenerated.
"""
time_to_reset = self.rate_limit_reset_ts - (datetime_utcnow().replace(microsecond=0).timestamp() + 1)
if time_to_reset < 0:
time_to_reset = 0
return time_to_reset
|
def function[calculate_time_to_reset, parameter[self]]:
constant[Calculate the seconds to reset the token requests, by obtaining the different
between the current date and the next date when the token is fully regenerated.
]
variable[time_to_reset] assign[=] binary_operation[name[self].rate_limit_reset_ts - binary_operation[call[call[call[name[datetime_utcnow], parameter[]].replace, parameter[]].timestamp, parameter[]] + constant[1]]]
if compare[name[time_to_reset] less[<] constant[0]] begin[:]
variable[time_to_reset] assign[=] constant[0]
return[name[time_to_reset]]
|
keyword[def] identifier[calculate_time_to_reset] ( identifier[self] ):
literal[string]
identifier[time_to_reset] = identifier[self] . identifier[rate_limit_reset_ts] -( identifier[datetime_utcnow] (). identifier[replace] ( identifier[microsecond] = literal[int] ). identifier[timestamp] ()+ literal[int] )
keyword[if] identifier[time_to_reset] < literal[int] :
identifier[time_to_reset] = literal[int]
keyword[return] identifier[time_to_reset]
|
def calculate_time_to_reset(self):
"""Calculate the seconds to reset the token requests, by obtaining the different
between the current date and the next date when the token is fully regenerated.
"""
time_to_reset = self.rate_limit_reset_ts - (datetime_utcnow().replace(microsecond=0).timestamp() + 1)
if time_to_reset < 0:
time_to_reset = 0 # depends on [control=['if'], data=['time_to_reset']]
return time_to_reset
|
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work/flow.
if manager is not None:
task.set_manager(manager)
else:
# Look first in work and then in the flow.
if hasattr(self, "manager"):
task.set_manager(self.manager)
else:
task.set_manager(self.flow.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
|
def function[allocate, parameter[self, manager]]:
constant[
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
]
for taget[tuple[[<ast.Name object at 0x7da1b23453c0>, <ast.Name object at 0x7da1b1cb73d0>]]] in starred[call[name[enumerate], parameter[name[self]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1cb4700> begin[:]
if compare[name[manager] is_not constant[None]] begin[:]
call[name[task].set_manager, parameter[name[manager]]]
variable[task_workdir] assign[=] call[name[os].path.join, parameter[name[self].workdir, binary_operation[constant[t] + call[name[str], parameter[name[i]]]]]]
if <ast.UnaryOp object at 0x7da1b1cb44f0> begin[:]
call[name[task].set_workdir, parameter[name[task_workdir]]]
|
keyword[def] identifier[allocate] ( identifier[self] , identifier[manager] = keyword[None] ):
literal[string]
keyword[for] identifier[i] , identifier[task] keyword[in] identifier[enumerate] ( identifier[self] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[task] , literal[string] ):
keyword[if] identifier[manager] keyword[is] keyword[not] keyword[None] :
identifier[task] . identifier[set_manager] ( identifier[manager] )
keyword[else] :
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[task] . identifier[set_manager] ( identifier[self] . identifier[manager] )
keyword[else] :
identifier[task] . identifier[set_manager] ( identifier[self] . identifier[flow] . identifier[manager] )
identifier[task_workdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[workdir] , literal[string] + identifier[str] ( identifier[i] ))
keyword[if] keyword[not] identifier[hasattr] ( identifier[task] , literal[string] ):
identifier[task] . identifier[set_workdir] ( identifier[task_workdir] )
keyword[else] :
keyword[if] identifier[task] . identifier[workdir] != identifier[task_workdir] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[task] . identifier[workdir] , identifier[task_workdir] ))
|
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for (i, task) in enumerate(self):
if not hasattr(task, 'manager'):
# Set the manager
# Use the one provided in input else the one of the work/flow.
if manager is not None:
task.set_manager(manager) # depends on [control=['if'], data=['manager']]
# Look first in work and then in the flow.
elif hasattr(self, 'manager'):
task.set_manager(self.manager) # depends on [control=['if'], data=[]]
else:
task.set_manager(self.flow.manager) # depends on [control=['if'], data=[]]
task_workdir = os.path.join(self.workdir, 't' + str(i))
if not hasattr(task, 'workdir'):
task.set_workdir(task_workdir) # depends on [control=['if'], data=[]]
elif task.workdir != task_workdir:
raise ValueError('task.workdir != task_workdir: %s, %s' % (task.workdir, task_workdir)) # depends on [control=['if'], data=['task_workdir']] # depends on [control=['for'], data=[]]
|
def convert_to_oqhazardlib(
self, tom, simple_mesh_spacing=1.0,
complex_mesh_spacing=2.0, area_discretisation=10.0,
use_defaults=False):
"""
Converts the source model to an iterator of sources of :class:
openquake.hazardlib.source.base.BaseSeismicSource
"""
oq_source_model = []
for source in self.sources:
if isinstance(source, mtkAreaSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
simple_mesh_spacing,
area_discretisation,
use_defaults))
elif isinstance(source, mtkPointSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
simple_mesh_spacing,
use_defaults))
elif isinstance(source, mtkSimpleFaultSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
simple_mesh_spacing,
use_defaults))
elif isinstance(source, mtkComplexFaultSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
complex_mesh_spacing,
use_defaults))
else:
raise ValueError('Source type not recognised!')
return oq_source_model
|
def function[convert_to_oqhazardlib, parameter[self, tom, simple_mesh_spacing, complex_mesh_spacing, area_discretisation, use_defaults]]:
constant[
Converts the source model to an iterator of sources of :class:
openquake.hazardlib.source.base.BaseSeismicSource
]
variable[oq_source_model] assign[=] list[[]]
for taget[name[source]] in starred[name[self].sources] begin[:]
if call[name[isinstance], parameter[name[source], name[mtkAreaSource]]] begin[:]
call[name[oq_source_model].append, parameter[call[name[source].create_oqhazardlib_source, parameter[name[tom], name[simple_mesh_spacing], name[area_discretisation], name[use_defaults]]]]]
return[name[oq_source_model]]
|
keyword[def] identifier[convert_to_oqhazardlib] (
identifier[self] , identifier[tom] , identifier[simple_mesh_spacing] = literal[int] ,
identifier[complex_mesh_spacing] = literal[int] , identifier[area_discretisation] = literal[int] ,
identifier[use_defaults] = keyword[False] ):
literal[string]
identifier[oq_source_model] =[]
keyword[for] identifier[source] keyword[in] identifier[self] . identifier[sources] :
keyword[if] identifier[isinstance] ( identifier[source] , identifier[mtkAreaSource] ):
identifier[oq_source_model] . identifier[append] ( identifier[source] . identifier[create_oqhazardlib_source] (
identifier[tom] ,
identifier[simple_mesh_spacing] ,
identifier[area_discretisation] ,
identifier[use_defaults] ))
keyword[elif] identifier[isinstance] ( identifier[source] , identifier[mtkPointSource] ):
identifier[oq_source_model] . identifier[append] ( identifier[source] . identifier[create_oqhazardlib_source] (
identifier[tom] ,
identifier[simple_mesh_spacing] ,
identifier[use_defaults] ))
keyword[elif] identifier[isinstance] ( identifier[source] , identifier[mtkSimpleFaultSource] ):
identifier[oq_source_model] . identifier[append] ( identifier[source] . identifier[create_oqhazardlib_source] (
identifier[tom] ,
identifier[simple_mesh_spacing] ,
identifier[use_defaults] ))
keyword[elif] identifier[isinstance] ( identifier[source] , identifier[mtkComplexFaultSource] ):
identifier[oq_source_model] . identifier[append] ( identifier[source] . identifier[create_oqhazardlib_source] (
identifier[tom] ,
identifier[complex_mesh_spacing] ,
identifier[use_defaults] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[oq_source_model]
|
def convert_to_oqhazardlib(self, tom, simple_mesh_spacing=1.0, complex_mesh_spacing=2.0, area_discretisation=10.0, use_defaults=False):
"""
Converts the source model to an iterator of sources of :class:
openquake.hazardlib.source.base.BaseSeismicSource
"""
oq_source_model = []
for source in self.sources:
if isinstance(source, mtkAreaSource):
oq_source_model.append(source.create_oqhazardlib_source(tom, simple_mesh_spacing, area_discretisation, use_defaults)) # depends on [control=['if'], data=[]]
elif isinstance(source, mtkPointSource):
oq_source_model.append(source.create_oqhazardlib_source(tom, simple_mesh_spacing, use_defaults)) # depends on [control=['if'], data=[]]
elif isinstance(source, mtkSimpleFaultSource):
oq_source_model.append(source.create_oqhazardlib_source(tom, simple_mesh_spacing, use_defaults)) # depends on [control=['if'], data=[]]
elif isinstance(source, mtkComplexFaultSource):
oq_source_model.append(source.create_oqhazardlib_source(tom, complex_mesh_spacing, use_defaults)) # depends on [control=['if'], data=[]]
else:
raise ValueError('Source type not recognised!') # depends on [control=['for'], data=['source']]
return oq_source_model
|
def _lreg_bokeh(self, **kwargs):
"""
Returns a Bokeh linear regression line
"""
try:
ds2 = self._duplicate_()
ds2.timestamps(ds2.x)
ds2.lreg("Timestamps", ds2.y)
ds2.drop(ds2.y)
ds2.df = ds2.df.rename(columns={'Regression': ds2.y})
if "date_format" in self.chart_style:
ds2.date("Date", format=self.chart_style["date_format"])
c = ds2.line_()
return c
except Exception as e:
self.err(e, "Can not draw linear regression chart")
|
def function[_lreg_bokeh, parameter[self]]:
constant[
Returns a Bokeh linear regression line
]
<ast.Try object at 0x7da18bc72290>
|
keyword[def] identifier[_lreg_bokeh] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[ds2] = identifier[self] . identifier[_duplicate_] ()
identifier[ds2] . identifier[timestamps] ( identifier[ds2] . identifier[x] )
identifier[ds2] . identifier[lreg] ( literal[string] , identifier[ds2] . identifier[y] )
identifier[ds2] . identifier[drop] ( identifier[ds2] . identifier[y] )
identifier[ds2] . identifier[df] = identifier[ds2] . identifier[df] . identifier[rename] ( identifier[columns] ={ literal[string] : identifier[ds2] . identifier[y] })
keyword[if] literal[string] keyword[in] identifier[self] . identifier[chart_style] :
identifier[ds2] . identifier[date] ( literal[string] , identifier[format] = identifier[self] . identifier[chart_style] [ literal[string] ])
identifier[c] = identifier[ds2] . identifier[line_] ()
keyword[return] identifier[c]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , literal[string] )
|
def _lreg_bokeh(self, **kwargs):
"""
Returns a Bokeh linear regression line
"""
try:
ds2 = self._duplicate_()
ds2.timestamps(ds2.x)
ds2.lreg('Timestamps', ds2.y)
ds2.drop(ds2.y)
ds2.df = ds2.df.rename(columns={'Regression': ds2.y})
if 'date_format' in self.chart_style:
ds2.date('Date', format=self.chart_style['date_format']) # depends on [control=['if'], data=[]]
c = ds2.line_()
return c # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, 'Can not draw linear regression chart') # depends on [control=['except'], data=['e']]
|
def render_customizations(self):
"""
Customize template for site user specified customizations
"""
disable_plugins = self.pt.customize_conf.get('disable_plugins', [])
if not disable_plugins:
logger.debug('No site-user specified plugins to disable')
else:
for plugin in disable_plugins:
try:
self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'],
'disabled at user request')
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for disable_plugins')
enable_plugins = self.pt.customize_conf.get('enable_plugins', [])
if not enable_plugins:
logger.debug('No site-user specified plugins to enable"')
else:
for plugin in enable_plugins:
try:
msg = 'enabled at user request'
self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'],
plugin['plugin_args'], msg)
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for enable_plugins')
|
def function[render_customizations, parameter[self]]:
constant[
Customize template for site user specified customizations
]
variable[disable_plugins] assign[=] call[name[self].pt.customize_conf.get, parameter[constant[disable_plugins], list[[]]]]
if <ast.UnaryOp object at 0x7da1b0fd9b10> begin[:]
call[name[logger].debug, parameter[constant[No site-user specified plugins to disable]]]
variable[enable_plugins] assign[=] call[name[self].pt.customize_conf.get, parameter[constant[enable_plugins], list[[]]]]
if <ast.UnaryOp object at 0x7da1b0fda620> begin[:]
call[name[logger].debug, parameter[constant[No site-user specified plugins to enable"]]]
|
keyword[def] identifier[render_customizations] ( identifier[self] ):
literal[string]
identifier[disable_plugins] = identifier[self] . identifier[pt] . identifier[customize_conf] . identifier[get] ( literal[string] ,[])
keyword[if] keyword[not] identifier[disable_plugins] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
keyword[for] identifier[plugin] keyword[in] identifier[disable_plugins] :
keyword[try] :
identifier[self] . identifier[pt] . identifier[remove_plugin] ( identifier[plugin] [ literal[string] ], identifier[plugin] [ literal[string] ],
literal[string] )
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[enable_plugins] = identifier[self] . identifier[pt] . identifier[customize_conf] . identifier[get] ( literal[string] ,[])
keyword[if] keyword[not] identifier[enable_plugins] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
keyword[for] identifier[plugin] keyword[in] identifier[enable_plugins] :
keyword[try] :
identifier[msg] = literal[string]
identifier[self] . identifier[pt] . identifier[add_plugin] ( identifier[plugin] [ literal[string] ], identifier[plugin] [ literal[string] ],
identifier[plugin] [ literal[string] ], identifier[msg] )
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[info] ( literal[string] )
|
def render_customizations(self):
"""
Customize template for site user specified customizations
"""
disable_plugins = self.pt.customize_conf.get('disable_plugins', [])
if not disable_plugins:
logger.debug('No site-user specified plugins to disable') # depends on [control=['if'], data=[]]
else:
for plugin in disable_plugins:
try:
self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'], 'disabled at user request') # depends on [control=['try'], data=[]]
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for disable_plugins') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['plugin']]
enable_plugins = self.pt.customize_conf.get('enable_plugins', [])
if not enable_plugins:
logger.debug('No site-user specified plugins to enable"') # depends on [control=['if'], data=[]]
else:
for plugin in enable_plugins:
try:
msg = 'enabled at user request'
self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'], plugin['plugin_args'], msg) # depends on [control=['try'], data=[]]
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for enable_plugins') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['plugin']]
|
def upsample_geolocation(self, dsid, info):
"""Upsample the geolocation (lon,lat) from the tiepoint grid"""
from geotiepoints import SatelliteInterpolator
# Read the fields needed:
col_indices = self.nc['nx_reduced'].values
row_indices = self.nc['ny_reduced'].values
lat_reduced = self.scale_dataset(dsid, self.nc['lat_reduced'], info)
lon_reduced = self.scale_dataset(dsid, self.nc['lon_reduced'], info)
shape = (self.nc['y'].shape[0], self.nc['x'].shape[0])
cols_full = np.arange(shape[1])
rows_full = np.arange(shape[0])
satint = SatelliteInterpolator((lon_reduced.values, lat_reduced.values),
(row_indices,
col_indices),
(rows_full, cols_full))
lons, lats = satint.interpolate()
self.cache['lon'] = xr.DataArray(lons, attrs=lon_reduced.attrs, dims=['y', 'x'])
self.cache['lat'] = xr.DataArray(lats, attrs=lat_reduced.attrs, dims=['y', 'x'])
return
|
def function[upsample_geolocation, parameter[self, dsid, info]]:
constant[Upsample the geolocation (lon,lat) from the tiepoint grid]
from relative_module[geotiepoints] import module[SatelliteInterpolator]
variable[col_indices] assign[=] call[name[self].nc][constant[nx_reduced]].values
variable[row_indices] assign[=] call[name[self].nc][constant[ny_reduced]].values
variable[lat_reduced] assign[=] call[name[self].scale_dataset, parameter[name[dsid], call[name[self].nc][constant[lat_reduced]], name[info]]]
variable[lon_reduced] assign[=] call[name[self].scale_dataset, parameter[name[dsid], call[name[self].nc][constant[lon_reduced]], name[info]]]
variable[shape] assign[=] tuple[[<ast.Subscript object at 0x7da1b2257250>, <ast.Subscript object at 0x7da1b2254a00>]]
variable[cols_full] assign[=] call[name[np].arange, parameter[call[name[shape]][constant[1]]]]
variable[rows_full] assign[=] call[name[np].arange, parameter[call[name[shape]][constant[0]]]]
variable[satint] assign[=] call[name[SatelliteInterpolator], parameter[tuple[[<ast.Attribute object at 0x7da1b2257e80>, <ast.Attribute object at 0x7da1b2255ff0>]], tuple[[<ast.Name object at 0x7da1b2257b20>, <ast.Name object at 0x7da1b2257c10>]], tuple[[<ast.Name object at 0x7da1b2255ed0>, <ast.Name object at 0x7da1b2254400>]]]]
<ast.Tuple object at 0x7da1b2257eb0> assign[=] call[name[satint].interpolate, parameter[]]
call[name[self].cache][constant[lon]] assign[=] call[name[xr].DataArray, parameter[name[lons]]]
call[name[self].cache][constant[lat]] assign[=] call[name[xr].DataArray, parameter[name[lats]]]
return[None]
|
keyword[def] identifier[upsample_geolocation] ( identifier[self] , identifier[dsid] , identifier[info] ):
literal[string]
keyword[from] identifier[geotiepoints] keyword[import] identifier[SatelliteInterpolator]
identifier[col_indices] = identifier[self] . identifier[nc] [ literal[string] ]. identifier[values]
identifier[row_indices] = identifier[self] . identifier[nc] [ literal[string] ]. identifier[values]
identifier[lat_reduced] = identifier[self] . identifier[scale_dataset] ( identifier[dsid] , identifier[self] . identifier[nc] [ literal[string] ], identifier[info] )
identifier[lon_reduced] = identifier[self] . identifier[scale_dataset] ( identifier[dsid] , identifier[self] . identifier[nc] [ literal[string] ], identifier[info] )
identifier[shape] =( identifier[self] . identifier[nc] [ literal[string] ]. identifier[shape] [ literal[int] ], identifier[self] . identifier[nc] [ literal[string] ]. identifier[shape] [ literal[int] ])
identifier[cols_full] = identifier[np] . identifier[arange] ( identifier[shape] [ literal[int] ])
identifier[rows_full] = identifier[np] . identifier[arange] ( identifier[shape] [ literal[int] ])
identifier[satint] = identifier[SatelliteInterpolator] (( identifier[lon_reduced] . identifier[values] , identifier[lat_reduced] . identifier[values] ),
( identifier[row_indices] ,
identifier[col_indices] ),
( identifier[rows_full] , identifier[cols_full] ))
identifier[lons] , identifier[lats] = identifier[satint] . identifier[interpolate] ()
identifier[self] . identifier[cache] [ literal[string] ]= identifier[xr] . identifier[DataArray] ( identifier[lons] , identifier[attrs] = identifier[lon_reduced] . identifier[attrs] , identifier[dims] =[ literal[string] , literal[string] ])
identifier[self] . identifier[cache] [ literal[string] ]= identifier[xr] . identifier[DataArray] ( identifier[lats] , identifier[attrs] = identifier[lat_reduced] . identifier[attrs] , identifier[dims] =[ literal[string] , literal[string] ])
keyword[return]
|
def upsample_geolocation(self, dsid, info):
"""Upsample the geolocation (lon,lat) from the tiepoint grid"""
from geotiepoints import SatelliteInterpolator
# Read the fields needed:
col_indices = self.nc['nx_reduced'].values
row_indices = self.nc['ny_reduced'].values
lat_reduced = self.scale_dataset(dsid, self.nc['lat_reduced'], info)
lon_reduced = self.scale_dataset(dsid, self.nc['lon_reduced'], info)
shape = (self.nc['y'].shape[0], self.nc['x'].shape[0])
cols_full = np.arange(shape[1])
rows_full = np.arange(shape[0])
satint = SatelliteInterpolator((lon_reduced.values, lat_reduced.values), (row_indices, col_indices), (rows_full, cols_full))
(lons, lats) = satint.interpolate()
self.cache['lon'] = xr.DataArray(lons, attrs=lon_reduced.attrs, dims=['y', 'x'])
self.cache['lat'] = xr.DataArray(lats, attrs=lat_reduced.attrs, dims=['y', 'x'])
return
|
def _avgConnectedSpanForColumn1D(self, columnIndex):
"""
The range of connected synapses for column. This is used to
calculate the inhibition radius. This variation of the function only
supports a 1 dimensional column topology.
Parameters:
----------------------------
:param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices
"""
assert(self._inputDimensions.size == 1)
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if connected.size == 0:
return 0
else:
return max(connected) - min(connected) + 1
|
def function[_avgConnectedSpanForColumn1D, parameter[self, columnIndex]]:
constant[
The range of connected synapses for column. This is used to
calculate the inhibition radius. This variation of the function only
supports a 1 dimensional column topology.
Parameters:
----------------------------
:param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices
]
assert[compare[name[self]._inputDimensions.size equal[==] constant[1]]]
variable[connected] assign[=] call[call[call[name[self]._connectedSynapses][name[columnIndex]].nonzero, parameter[]]][constant[0]]
if compare[name[connected].size equal[==] constant[0]] begin[:]
return[constant[0]]
|
keyword[def] identifier[_avgConnectedSpanForColumn1D] ( identifier[self] , identifier[columnIndex] ):
literal[string]
keyword[assert] ( identifier[self] . identifier[_inputDimensions] . identifier[size] == literal[int] )
identifier[connected] = identifier[self] . identifier[_connectedSynapses] [ identifier[columnIndex] ]. identifier[nonzero] ()[ literal[int] ]
keyword[if] identifier[connected] . identifier[size] == literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[max] ( identifier[connected] )- identifier[min] ( identifier[connected] )+ literal[int]
|
def _avgConnectedSpanForColumn1D(self, columnIndex):
"""
The range of connected synapses for column. This is used to
calculate the inhibition radius. This variation of the function only
supports a 1 dimensional column topology.
Parameters:
----------------------------
:param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices
"""
assert self._inputDimensions.size == 1
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if connected.size == 0:
return 0 # depends on [control=['if'], data=[]]
else:
return max(connected) - min(connected) + 1
|
def loadtxt_str(path:PathOrStr)->np.ndarray:
"Return `ndarray` of `str` of lines of text from `path`."
with open(path, 'r') as f: lines = f.readlines()
return np.array([l.strip() for l in lines])
|
def function[loadtxt_str, parameter[path]]:
constant[Return `ndarray` of `str` of lines of text from `path`.]
with call[name[open], parameter[name[path], constant[r]]] begin[:]
variable[lines] assign[=] call[name[f].readlines, parameter[]]
return[call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1d6e950>]]]
|
keyword[def] identifier[loadtxt_str] ( identifier[path] : identifier[PathOrStr] )-> identifier[np] . identifier[ndarray] :
literal[string]
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] : identifier[lines] = identifier[f] . identifier[readlines] ()
keyword[return] identifier[np] . identifier[array] ([ identifier[l] . identifier[strip] () keyword[for] identifier[l] keyword[in] identifier[lines] ])
|
def loadtxt_str(path: PathOrStr) -> np.ndarray:
"""Return `ndarray` of `str` of lines of text from `path`."""
with open(path, 'r') as f:
lines = f.readlines() # depends on [control=['with'], data=['f']]
return np.array([l.strip() for l in lines])
|
def incoordination_score(self, data_frame):
"""
This method calculates the variance of the time interval in msec between taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return is: incoordination score
:rtype is: float
"""
diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values
inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0
duration = math.ceil(data_frame.td[-1])
return inc_s, duration
|
def function[incoordination_score, parameter[self, data_frame]]:
constant[
This method calculates the variance of the time interval in msec between taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return is: incoordination score
:rtype is: float
]
variable[diff] assign[=] binary_operation[call[name[data_frame].td][<ast.Slice object at 0x7da1b1b7fdf0>].values - call[name[data_frame].td][<ast.Slice object at 0x7da1b1b7e590>].values]
variable[inc_s] assign[=] binary_operation[call[name[np].var, parameter[call[name[diff]][call[name[np].arange, parameter[constant[1], call[name[len], parameter[name[diff]]], constant[2]]]]]] * constant[1000.0]]
variable[duration] assign[=] call[name[math].ceil, parameter[call[name[data_frame].td][<ast.UnaryOp object at 0x7da1b1b7eef0>]]]
return[tuple[[<ast.Name object at 0x7da1b1b7e4a0>, <ast.Name object at 0x7da1b1b7de70>]]]
|
keyword[def] identifier[incoordination_score] ( identifier[self] , identifier[data_frame] ):
literal[string]
identifier[diff] = identifier[data_frame] . identifier[td] [ literal[int] :- literal[int] ]. identifier[values] - identifier[data_frame] . identifier[td] [ literal[int] :- literal[int] ]. identifier[values]
identifier[inc_s] = identifier[np] . identifier[var] ( identifier[diff] [ identifier[np] . identifier[arange] ( literal[int] , identifier[len] ( identifier[diff] ), literal[int] )], identifier[dtype] = identifier[np] . identifier[float64] )* literal[int]
identifier[duration] = identifier[math] . identifier[ceil] ( identifier[data_frame] . identifier[td] [- literal[int] ])
keyword[return] identifier[inc_s] , identifier[duration]
|
def incoordination_score(self, data_frame):
"""
This method calculates the variance of the time interval in msec between taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return is: incoordination score
:rtype is: float
"""
diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values
inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0
duration = math.ceil(data_frame.td[-1])
return (inc_s, duration)
|
def add_and_get(self, delta):
"""
Adds the given value to the current value and returns the updated value.
:raises NoDataMemberInClusterError: if the cluster does not contain any data members.
:raises UnsupportedOperationError: if the cluster version is less than 3.10.
:raises ConsistencyLostError: if the session guarantees have been lost.
:param delta: (int), the value to add.
:return: (int), the updated value.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=False)
|
def function[add_and_get, parameter[self, delta]]:
constant[
Adds the given value to the current value and returns the updated value.
:raises NoDataMemberInClusterError: if the cluster does not contain any data members.
:raises UnsupportedOperationError: if the cluster version is less than 3.10.
:raises ConsistencyLostError: if the session guarantees have been lost.
:param delta: (int), the value to add.
:return: (int), the updated value.
]
return[call[name[self]._invoke_internal, parameter[name[pn_counter_add_codec]]]]
|
keyword[def] identifier[add_and_get] ( identifier[self] , identifier[delta] ):
literal[string]
keyword[return] identifier[self] . identifier[_invoke_internal] ( identifier[pn_counter_add_codec] , identifier[delta] = identifier[delta] , identifier[get_before_update] = keyword[False] )
|
def add_and_get(self, delta):
"""
Adds the given value to the current value and returns the updated value.
:raises NoDataMemberInClusterError: if the cluster does not contain any data members.
:raises UnsupportedOperationError: if the cluster version is less than 3.10.
:raises ConsistencyLostError: if the session guarantees have been lost.
:param delta: (int), the value to add.
:return: (int), the updated value.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=False)
|
def create_argparser():
"""Instantiate an `argparse.ArgumentParser`.
Adds all basic cli options including default values.
"""
parser = argparse.ArgumentParser()
arg_defaults = {
"daemon": False,
"loop": False,
"listpresets": False,
"config": None,
"debug": False,
"sleeptime": 300,
"version": False,
"verbose_count": 0
}
# add generic client options to the CLI:
parser.add_argument("-c", "--config", dest="config",
help="config file", default=arg_defaults["config"])
parser.add_argument("--list-presets", dest="listpresets",
help="list all available presets",
action="store_true", default=arg_defaults["listpresets"])
parser.add_argument("-d", "--daemon", dest="daemon",
help="go into daemon mode (implies --loop)",
action="store_true", default=arg_defaults["daemon"])
parser.add_argument("--debug", dest="debug",
help="increase logging level to DEBUG (DEPRECATED, please use -vvv)",
action="store_true", default=arg_defaults["debug"])
parser.add_argument("--loop", dest="loop",
help="loop forever (default is to update once)",
action="store_true", default=arg_defaults["loop"])
parser.add_argument("--sleeptime", dest="sleeptime",
help="how long to sleep between checks in seconds",
default=arg_defaults["sleeptime"])
parser.add_argument("--version", dest="version",
help="show version and exit",
action="store_true", default=arg_defaults["version"])
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=arg_defaults["verbose_count"],
help="increases log verbosity for each occurrence")
return parser, arg_defaults
|
def function[create_argparser, parameter[]]:
constant[Instantiate an `argparse.ArgumentParser`.
Adds all basic cli options including default values.
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
variable[arg_defaults] assign[=] dictionary[[<ast.Constant object at 0x7da1b18399c0>, <ast.Constant object at 0x7da1b183a770>, <ast.Constant object at 0x7da1b183b640>, <ast.Constant object at 0x7da1b1838640>, <ast.Constant object at 0x7da1b18382b0>, <ast.Constant object at 0x7da1b1838910>, <ast.Constant object at 0x7da1b1839b10>, <ast.Constant object at 0x7da1b1838fa0>], [<ast.Constant object at 0x7da1b183ae30>, <ast.Constant object at 0x7da1b1839bd0>, <ast.Constant object at 0x7da1b18391b0>, <ast.Constant object at 0x7da1b18398d0>, <ast.Constant object at 0x7da1b183a6b0>, <ast.Constant object at 0x7da1b183ac80>, <ast.Constant object at 0x7da1b1839ed0>, <ast.Constant object at 0x7da1b18393f0>]]
call[name[parser].add_argument, parameter[constant[-c], constant[--config]]]
call[name[parser].add_argument, parameter[constant[--list-presets]]]
call[name[parser].add_argument, parameter[constant[-d], constant[--daemon]]]
call[name[parser].add_argument, parameter[constant[--debug]]]
call[name[parser].add_argument, parameter[constant[--loop]]]
call[name[parser].add_argument, parameter[constant[--sleeptime]]]
call[name[parser].add_argument, parameter[constant[--version]]]
call[name[parser].add_argument, parameter[constant[-v], constant[--verbose]]]
return[tuple[[<ast.Name object at 0x7da1b1839e70>, <ast.Name object at 0x7da1b1839810>]]]
|
keyword[def] identifier[create_argparser] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[arg_defaults] ={
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[None] ,
literal[string] : keyword[False] ,
literal[string] : literal[int] ,
literal[string] : keyword[False] ,
literal[string] : literal[int]
}
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] , identifier[default] = identifier[arg_defaults] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = identifier[arg_defaults] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = identifier[arg_defaults] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = identifier[arg_defaults] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = identifier[arg_defaults] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = identifier[arg_defaults] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = identifier[arg_defaults] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = identifier[arg_defaults] [ literal[string] ],
identifier[help] = literal[string] )
keyword[return] identifier[parser] , identifier[arg_defaults]
|
def create_argparser():
"""Instantiate an `argparse.ArgumentParser`.
Adds all basic cli options including default values.
"""
parser = argparse.ArgumentParser()
arg_defaults = {'daemon': False, 'loop': False, 'listpresets': False, 'config': None, 'debug': False, 'sleeptime': 300, 'version': False, 'verbose_count': 0}
# add generic client options to the CLI:
parser.add_argument('-c', '--config', dest='config', help='config file', default=arg_defaults['config'])
parser.add_argument('--list-presets', dest='listpresets', help='list all available presets', action='store_true', default=arg_defaults['listpresets'])
parser.add_argument('-d', '--daemon', dest='daemon', help='go into daemon mode (implies --loop)', action='store_true', default=arg_defaults['daemon'])
parser.add_argument('--debug', dest='debug', help='increase logging level to DEBUG (DEPRECATED, please use -vvv)', action='store_true', default=arg_defaults['debug'])
parser.add_argument('--loop', dest='loop', help='loop forever (default is to update once)', action='store_true', default=arg_defaults['loop'])
parser.add_argument('--sleeptime', dest='sleeptime', help='how long to sleep between checks in seconds', default=arg_defaults['sleeptime'])
parser.add_argument('--version', dest='version', help='show version and exit', action='store_true', default=arg_defaults['version'])
parser.add_argument('-v', '--verbose', dest='verbose_count', action='count', default=arg_defaults['verbose_count'], help='increases log verbosity for each occurrence')
return (parser, arg_defaults)
|
def bkg_noise(readout_noise, exposure_time, sky_brightness, pixel_scael, num_exposures=1):
"""
computes the expected Gaussian background noise of a pixel in units of counts/second
:param readout_noise: noise added per readout
:param exposure_time: exposure time per exposure (in seconds)
:param sky_brightness: counts per second per unit arcsecond square
:param pixel_scael: size of pixel in units arcseonds
:param num_exposures: number of exposures (with same exposure time) to be co-added
:return: estimated Gaussian noise sqrt(variance)
"""
exposure_time_tot = num_exposures * exposure_time
readout_noise_tot = num_exposures * readout_noise ** 2
sky_per_pixel = sky_brightness * pixel_scael ** 2
sigma_bkg = np.sqrt(readout_noise_tot + exposure_time_tot * sky_per_pixel ** 2) / exposure_time_tot
return sigma_bkg
|
def function[bkg_noise, parameter[readout_noise, exposure_time, sky_brightness, pixel_scael, num_exposures]]:
constant[
computes the expected Gaussian background noise of a pixel in units of counts/second
:param readout_noise: noise added per readout
:param exposure_time: exposure time per exposure (in seconds)
:param sky_brightness: counts per second per unit arcsecond square
:param pixel_scael: size of pixel in units arcseonds
:param num_exposures: number of exposures (with same exposure time) to be co-added
:return: estimated Gaussian noise sqrt(variance)
]
variable[exposure_time_tot] assign[=] binary_operation[name[num_exposures] * name[exposure_time]]
variable[readout_noise_tot] assign[=] binary_operation[name[num_exposures] * binary_operation[name[readout_noise] ** constant[2]]]
variable[sky_per_pixel] assign[=] binary_operation[name[sky_brightness] * binary_operation[name[pixel_scael] ** constant[2]]]
variable[sigma_bkg] assign[=] binary_operation[call[name[np].sqrt, parameter[binary_operation[name[readout_noise_tot] + binary_operation[name[exposure_time_tot] * binary_operation[name[sky_per_pixel] ** constant[2]]]]]] / name[exposure_time_tot]]
return[name[sigma_bkg]]
|
keyword[def] identifier[bkg_noise] ( identifier[readout_noise] , identifier[exposure_time] , identifier[sky_brightness] , identifier[pixel_scael] , identifier[num_exposures] = literal[int] ):
literal[string]
identifier[exposure_time_tot] = identifier[num_exposures] * identifier[exposure_time]
identifier[readout_noise_tot] = identifier[num_exposures] * identifier[readout_noise] ** literal[int]
identifier[sky_per_pixel] = identifier[sky_brightness] * identifier[pixel_scael] ** literal[int]
identifier[sigma_bkg] = identifier[np] . identifier[sqrt] ( identifier[readout_noise_tot] + identifier[exposure_time_tot] * identifier[sky_per_pixel] ** literal[int] )/ identifier[exposure_time_tot]
keyword[return] identifier[sigma_bkg]
|
def bkg_noise(readout_noise, exposure_time, sky_brightness, pixel_scael, num_exposures=1):
"""
computes the expected Gaussian background noise of a pixel in units of counts/second
:param readout_noise: noise added per readout
:param exposure_time: exposure time per exposure (in seconds)
:param sky_brightness: counts per second per unit arcsecond square
:param pixel_scael: size of pixel in units arcseonds
:param num_exposures: number of exposures (with same exposure time) to be co-added
:return: estimated Gaussian noise sqrt(variance)
"""
exposure_time_tot = num_exposures * exposure_time
readout_noise_tot = num_exposures * readout_noise ** 2
sky_per_pixel = sky_brightness * pixel_scael ** 2
sigma_bkg = np.sqrt(readout_noise_tot + exposure_time_tot * sky_per_pixel ** 2) / exposure_time_tot
return sigma_bkg
|
def phoncontent(self, cls='current', correctionhandling=CorrectionHandling.CURRENT):
"""See :meth:`AbstractElement.phoncontent`"""
if cls == 'original': correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return e.phoncontent(cls, correctionhandling)
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
return e.phoncontent(cls, correctionhandling)
raise NoSuchPhon
|
def function[phoncontent, parameter[self, cls, correctionhandling]]:
constant[See :meth:`AbstractElement.phoncontent`]
if compare[name[cls] equal[==] constant[original]] begin[:]
variable[correctionhandling] assign[=] name[CorrectionHandling].ORIGINAL
if compare[name[correctionhandling] in tuple[[<ast.Attribute object at 0x7da18f58dab0>, <ast.Attribute object at 0x7da18f58c7c0>]]] begin[:]
for taget[name[e]] in starred[name[self]] begin[:]
if <ast.BoolOp object at 0x7da18f58f280> begin[:]
return[call[name[e].phoncontent, parameter[name[cls], name[correctionhandling]]]]
if compare[name[correctionhandling] in tuple[[<ast.Attribute object at 0x7da18f58c670>, <ast.Attribute object at 0x7da18f58f3d0>]]] begin[:]
for taget[name[e]] in starred[name[self]] begin[:]
if call[name[isinstance], parameter[name[e], name[Original]]] begin[:]
return[call[name[e].phoncontent, parameter[name[cls], name[correctionhandling]]]]
<ast.Raise object at 0x7da2041d9de0>
|
keyword[def] identifier[phoncontent] ( identifier[self] , identifier[cls] = literal[string] , identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[CURRENT] ):
literal[string]
keyword[if] identifier[cls] == literal[string] : identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[ORIGINAL]
keyword[if] identifier[correctionhandling] keyword[in] ( identifier[CorrectionHandling] . identifier[CURRENT] , identifier[CorrectionHandling] . identifier[EITHER] ):
keyword[for] identifier[e] keyword[in] identifier[self] :
keyword[if] identifier[isinstance] ( identifier[e] , identifier[New] ) keyword[or] identifier[isinstance] ( identifier[e] , identifier[Current] ):
keyword[return] identifier[e] . identifier[phoncontent] ( identifier[cls] , identifier[correctionhandling] )
keyword[if] identifier[correctionhandling] keyword[in] ( identifier[CorrectionHandling] . identifier[ORIGINAL] , identifier[CorrectionHandling] . identifier[EITHER] ):
keyword[for] identifier[e] keyword[in] identifier[self] :
keyword[if] identifier[isinstance] ( identifier[e] , identifier[Original] ):
keyword[return] identifier[e] . identifier[phoncontent] ( identifier[cls] , identifier[correctionhandling] )
keyword[raise] identifier[NoSuchPhon]
|
def phoncontent(self, cls='current', correctionhandling=CorrectionHandling.CURRENT):
"""See :meth:`AbstractElement.phoncontent`"""
if cls == 'original':
correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility # depends on [control=['if'], data=[]]
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return e.phoncontent(cls, correctionhandling) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=['correctionhandling']]
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
return e.phoncontent(cls, correctionhandling) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=['correctionhandling']]
raise NoSuchPhon
|
def prepare_authorization_response(self, request, token, headers, body, status):
"""Place token according to response mode.
Base classes can define a default response mode for their authorization
response by overriding the static `default_response_mode` member.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token:
:param headers:
:param body:
:param status:
"""
request.response_mode = request.response_mode or self.default_response_mode
if request.response_mode not in ('query', 'fragment'):
log.debug('Overriding invalid response mode %s with %s',
request.response_mode, self.default_response_mode)
request.response_mode = self.default_response_mode
token_items = token.items()
if request.response_type == 'none':
state = token.get('state', None)
if state:
token_items = [('state', state)]
else:
token_items = []
if request.response_mode == 'query':
headers['Location'] = add_params_to_uri(
request.redirect_uri, token_items, fragment=False)
return headers, body, status
if request.response_mode == 'fragment':
headers['Location'] = add_params_to_uri(
request.redirect_uri, token_items, fragment=True)
return headers, body, status
raise NotImplementedError(
'Subclasses must set a valid default_response_mode')
|
def function[prepare_authorization_response, parameter[self, request, token, headers, body, status]]:
constant[Place token according to response mode.
Base classes can define a default response mode for their authorization
response by overriding the static `default_response_mode` member.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token:
:param headers:
:param body:
:param status:
]
name[request].response_mode assign[=] <ast.BoolOp object at 0x7da20c6a93c0>
if compare[name[request].response_mode <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c6aad10>, <ast.Constant object at 0x7da20c6aa290>]]] begin[:]
call[name[log].debug, parameter[constant[Overriding invalid response mode %s with %s], name[request].response_mode, name[self].default_response_mode]]
name[request].response_mode assign[=] name[self].default_response_mode
variable[token_items] assign[=] call[name[token].items, parameter[]]
if compare[name[request].response_type equal[==] constant[none]] begin[:]
variable[state] assign[=] call[name[token].get, parameter[constant[state], constant[None]]]
if name[state] begin[:]
variable[token_items] assign[=] list[[<ast.Tuple object at 0x7da20c6ab520>]]
if compare[name[request].response_mode equal[==] constant[query]] begin[:]
call[name[headers]][constant[Location]] assign[=] call[name[add_params_to_uri], parameter[name[request].redirect_uri, name[token_items]]]
return[tuple[[<ast.Name object at 0x7da20c6a8790>, <ast.Name object at 0x7da20c6aa1a0>, <ast.Name object at 0x7da20c6a9960>]]]
if compare[name[request].response_mode equal[==] constant[fragment]] begin[:]
call[name[headers]][constant[Location]] assign[=] call[name[add_params_to_uri], parameter[name[request].redirect_uri, name[token_items]]]
return[tuple[[<ast.Name object at 0x7da20c6a9510>, <ast.Name object at 0x7da20c6a96c0>, <ast.Name object at 0x7da20c6a9810>]]]
<ast.Raise object at 0x7da20c6abeb0>
|
keyword[def] identifier[prepare_authorization_response] ( identifier[self] , identifier[request] , identifier[token] , identifier[headers] , identifier[body] , identifier[status] ):
literal[string]
identifier[request] . identifier[response_mode] = identifier[request] . identifier[response_mode] keyword[or] identifier[self] . identifier[default_response_mode]
keyword[if] identifier[request] . identifier[response_mode] keyword[not] keyword[in] ( literal[string] , literal[string] ):
identifier[log] . identifier[debug] ( literal[string] ,
identifier[request] . identifier[response_mode] , identifier[self] . identifier[default_response_mode] )
identifier[request] . identifier[response_mode] = identifier[self] . identifier[default_response_mode]
identifier[token_items] = identifier[token] . identifier[items] ()
keyword[if] identifier[request] . identifier[response_type] == literal[string] :
identifier[state] = identifier[token] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[state] :
identifier[token_items] =[( literal[string] , identifier[state] )]
keyword[else] :
identifier[token_items] =[]
keyword[if] identifier[request] . identifier[response_mode] == literal[string] :
identifier[headers] [ literal[string] ]= identifier[add_params_to_uri] (
identifier[request] . identifier[redirect_uri] , identifier[token_items] , identifier[fragment] = keyword[False] )
keyword[return] identifier[headers] , identifier[body] , identifier[status]
keyword[if] identifier[request] . identifier[response_mode] == literal[string] :
identifier[headers] [ literal[string] ]= identifier[add_params_to_uri] (
identifier[request] . identifier[redirect_uri] , identifier[token_items] , identifier[fragment] = keyword[True] )
keyword[return] identifier[headers] , identifier[body] , identifier[status]
keyword[raise] identifier[NotImplementedError] (
literal[string] )
|
def prepare_authorization_response(self, request, token, headers, body, status):
"""Place token according to response mode.
Base classes can define a default response mode for their authorization
response by overriding the static `default_response_mode` member.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token:
:param headers:
:param body:
:param status:
"""
request.response_mode = request.response_mode or self.default_response_mode
if request.response_mode not in ('query', 'fragment'):
log.debug('Overriding invalid response mode %s with %s', request.response_mode, self.default_response_mode)
request.response_mode = self.default_response_mode # depends on [control=['if'], data=[]]
token_items = token.items()
if request.response_type == 'none':
state = token.get('state', None)
if state:
token_items = [('state', state)] # depends on [control=['if'], data=[]]
else:
token_items = [] # depends on [control=['if'], data=[]]
if request.response_mode == 'query':
headers['Location'] = add_params_to_uri(request.redirect_uri, token_items, fragment=False)
return (headers, body, status) # depends on [control=['if'], data=[]]
if request.response_mode == 'fragment':
headers['Location'] = add_params_to_uri(request.redirect_uri, token_items, fragment=True)
return (headers, body, status) # depends on [control=['if'], data=[]]
raise NotImplementedError('Subclasses must set a valid default_response_mode')
|
def request(self, method, data=None, nid=None, nid_key='nid',
api_type="logic", return_response=False):
"""Get data from arbitrary Piazza API endpoint `method` in network `nid`
:type method: str
:param method: An internal Piazza API method name like `content.get`
or `network.get_users`
:type data: dict
:param data: Key-value data to pass to Piazza in the request
:type nid: str
:param nid: This is the ID of the network to which the request
should be made. This is optional and only to override the
existing `network_id` entered when creating the class
:type nid_key: str
:param nid_key: Name expected by Piazza for `nid` when making request.
(Usually and by default "nid", but sometimes "id" is expected)
:returns: Python object containing returned data
:type return_response: bool
:param return_response: If set, returns whole :class:`requests.Response`
object rather than just the response body
"""
self._check_authenticated()
nid = nid if nid else self._nid
if data is None:
data = {}
headers = {}
if "session_id" in self.session.cookies:
headers["CSRF-Token"] = self.session.cookies["session_id"]
# Adding a nonce to the request
endpoint = self.base_api_urls[api_type]
if api_type == "logic":
endpoint += "?method={}&aid={}".format(
method,
_piazza_nonce()
)
response = self.session.post(
endpoint,
data=json.dumps({
"method": method,
"params": dict({nid_key: nid}, **data)
}),
headers=headers
)
return response if return_response else response.json()
|
def function[request, parameter[self, method, data, nid, nid_key, api_type, return_response]]:
constant[Get data from arbitrary Piazza API endpoint `method` in network `nid`
:type method: str
:param method: An internal Piazza API method name like `content.get`
or `network.get_users`
:type data: dict
:param data: Key-value data to pass to Piazza in the request
:type nid: str
:param nid: This is the ID of the network to which the request
should be made. This is optional and only to override the
existing `network_id` entered when creating the class
:type nid_key: str
:param nid_key: Name expected by Piazza for `nid` when making request.
(Usually and by default "nid", but sometimes "id" is expected)
:returns: Python object containing returned data
:type return_response: bool
:param return_response: If set, returns whole :class:`requests.Response`
object rather than just the response body
]
call[name[self]._check_authenticated, parameter[]]
variable[nid] assign[=] <ast.IfExp object at 0x7da1b0e5aef0>
if compare[name[data] is constant[None]] begin[:]
variable[data] assign[=] dictionary[[], []]
variable[headers] assign[=] dictionary[[], []]
if compare[constant[session_id] in name[self].session.cookies] begin[:]
call[name[headers]][constant[CSRF-Token]] assign[=] call[name[self].session.cookies][constant[session_id]]
variable[endpoint] assign[=] call[name[self].base_api_urls][name[api_type]]
if compare[name[api_type] equal[==] constant[logic]] begin[:]
<ast.AugAssign object at 0x7da2044c3af0>
variable[response] assign[=] call[name[self].session.post, parameter[name[endpoint]]]
return[<ast.IfExp object at 0x7da2044c1f90>]
|
keyword[def] identifier[request] ( identifier[self] , identifier[method] , identifier[data] = keyword[None] , identifier[nid] = keyword[None] , identifier[nid_key] = literal[string] ,
identifier[api_type] = literal[string] , identifier[return_response] = keyword[False] ):
literal[string]
identifier[self] . identifier[_check_authenticated] ()
identifier[nid] = identifier[nid] keyword[if] identifier[nid] keyword[else] identifier[self] . identifier[_nid]
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[data] ={}
identifier[headers] ={}
keyword[if] literal[string] keyword[in] identifier[self] . identifier[session] . identifier[cookies] :
identifier[headers] [ literal[string] ]= identifier[self] . identifier[session] . identifier[cookies] [ literal[string] ]
identifier[endpoint] = identifier[self] . identifier[base_api_urls] [ identifier[api_type] ]
keyword[if] identifier[api_type] == literal[string] :
identifier[endpoint] += literal[string] . identifier[format] (
identifier[method] ,
identifier[_piazza_nonce] ()
)
identifier[response] = identifier[self] . identifier[session] . identifier[post] (
identifier[endpoint] ,
identifier[data] = identifier[json] . identifier[dumps] ({
literal[string] : identifier[method] ,
literal[string] : identifier[dict] ({ identifier[nid_key] : identifier[nid] },** identifier[data] )
}),
identifier[headers] = identifier[headers]
)
keyword[return] identifier[response] keyword[if] identifier[return_response] keyword[else] identifier[response] . identifier[json] ()
|
def request(self, method, data=None, nid=None, nid_key='nid', api_type='logic', return_response=False):
"""Get data from arbitrary Piazza API endpoint `method` in network `nid`
:type method: str
:param method: An internal Piazza API method name like `content.get`
or `network.get_users`
:type data: dict
:param data: Key-value data to pass to Piazza in the request
:type nid: str
:param nid: This is the ID of the network to which the request
should be made. This is optional and only to override the
existing `network_id` entered when creating the class
:type nid_key: str
:param nid_key: Name expected by Piazza for `nid` when making request.
(Usually and by default "nid", but sometimes "id" is expected)
:returns: Python object containing returned data
:type return_response: bool
:param return_response: If set, returns whole :class:`requests.Response`
object rather than just the response body
"""
self._check_authenticated()
nid = nid if nid else self._nid
if data is None:
data = {} # depends on [control=['if'], data=['data']]
headers = {}
if 'session_id' in self.session.cookies:
headers['CSRF-Token'] = self.session.cookies['session_id'] # depends on [control=['if'], data=[]]
# Adding a nonce to the request
endpoint = self.base_api_urls[api_type]
if api_type == 'logic':
endpoint += '?method={}&aid={}'.format(method, _piazza_nonce()) # depends on [control=['if'], data=[]]
response = self.session.post(endpoint, data=json.dumps({'method': method, 'params': dict({nid_key: nid}, **data)}), headers=headers)
return response if return_response else response.json()
|
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
|
def function[dummyctrl, parameter[self, r, ctrl]]:
constant[creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
]
variable[dv] assign[=] call[name[DummyVertex], parameter[name[r]]]
<ast.Tuple object at 0x7da1b1035ae0> assign[=] tuple[[<ast.Attribute object at 0x7da1b1061e70>, <ast.Attribute object at 0x7da1b1060b50>]]
call[name[self].grx][name[dv]] assign[=] name[dv]
name[dv].ctrl assign[=] name[ctrl]
call[name[ctrl]][name[r]] assign[=] name[dv]
call[call[name[self].layers][name[r]].append, parameter[name[dv]]]
return[name[dv]]
|
keyword[def] identifier[dummyctrl] ( identifier[self] , identifier[r] , identifier[ctrl] ):
literal[string]
identifier[dv] = identifier[DummyVertex] ( identifier[r] )
identifier[dv] . identifier[view] . identifier[w] , identifier[dv] . identifier[view] . identifier[h] = identifier[self] . identifier[dw] , identifier[self] . identifier[dh]
identifier[self] . identifier[grx] [ identifier[dv] ]= identifier[dv]
identifier[dv] . identifier[ctrl] = identifier[ctrl]
identifier[ctrl] [ identifier[r] ]= identifier[dv]
identifier[self] . identifier[layers] [ identifier[r] ]. identifier[append] ( identifier[dv] )
keyword[return] identifier[dv]
|
def dummyctrl(self, r, ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
(dv.view.w, dv.view.h) = (self.dw, self.dh)
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
|
def create_rcontext(self, size, frame):
'''
Creates a recording surface for the bot to draw on
:param size: The width and height of bot
'''
self.frame = frame
width, height = size
meta_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0, 0, width, height))
ctx = cairo.Context(meta_surface)
return ctx
|
def function[create_rcontext, parameter[self, size, frame]]:
constant[
Creates a recording surface for the bot to draw on
:param size: The width and height of bot
]
name[self].frame assign[=] name[frame]
<ast.Tuple object at 0x7da1b00f4a30> assign[=] name[size]
variable[meta_surface] assign[=] call[name[cairo].RecordingSurface, parameter[name[cairo].CONTENT_COLOR_ALPHA, tuple[[<ast.Constant object at 0x7da18f723700>, <ast.Constant object at 0x7da18f7206d0>, <ast.Name object at 0x7da18f720850>, <ast.Name object at 0x7da18f723b50>]]]]
variable[ctx] assign[=] call[name[cairo].Context, parameter[name[meta_surface]]]
return[name[ctx]]
|
keyword[def] identifier[create_rcontext] ( identifier[self] , identifier[size] , identifier[frame] ):
literal[string]
identifier[self] . identifier[frame] = identifier[frame]
identifier[width] , identifier[height] = identifier[size]
identifier[meta_surface] = identifier[cairo] . identifier[RecordingSurface] ( identifier[cairo] . identifier[CONTENT_COLOR_ALPHA] ,( literal[int] , literal[int] , identifier[width] , identifier[height] ))
identifier[ctx] = identifier[cairo] . identifier[Context] ( identifier[meta_surface] )
keyword[return] identifier[ctx]
|
def create_rcontext(self, size, frame):
"""
Creates a recording surface for the bot to draw on
:param size: The width and height of bot
"""
self.frame = frame
(width, height) = size
meta_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0, 0, width, height))
ctx = cairo.Context(meta_surface)
return ctx
|
def unlock(self):
"""Unlock the device."""
success = self.set_status(CONST.STATUS_LOCKOPEN_INT)
if success:
self._json_state['status'] = CONST.STATUS_LOCKOPEN
return success
|
def function[unlock, parameter[self]]:
constant[Unlock the device.]
variable[success] assign[=] call[name[self].set_status, parameter[name[CONST].STATUS_LOCKOPEN_INT]]
if name[success] begin[:]
call[name[self]._json_state][constant[status]] assign[=] name[CONST].STATUS_LOCKOPEN
return[name[success]]
|
keyword[def] identifier[unlock] ( identifier[self] ):
literal[string]
identifier[success] = identifier[self] . identifier[set_status] ( identifier[CONST] . identifier[STATUS_LOCKOPEN_INT] )
keyword[if] identifier[success] :
identifier[self] . identifier[_json_state] [ literal[string] ]= identifier[CONST] . identifier[STATUS_LOCKOPEN]
keyword[return] identifier[success]
|
def unlock(self):
"""Unlock the device."""
success = self.set_status(CONST.STATUS_LOCKOPEN_INT)
if success:
self._json_state['status'] = CONST.STATUS_LOCKOPEN # depends on [control=['if'], data=[]]
return success
|
def show(close=None):
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
"""
if close is None:
close = InlineBackend.instance().close_figures
try:
for figure_manager in Gcf.get_all_fig_managers():
send_figure(figure_manager.canvas.figure)
finally:
show._to_draw = []
if close:
matplotlib.pyplot.close('all')
|
def function[show, parameter[close]]:
constant[Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
]
if compare[name[close] is constant[None]] begin[:]
variable[close] assign[=] call[name[InlineBackend].instance, parameter[]].close_figures
<ast.Try object at 0x7da18f810430>
|
keyword[def] identifier[show] ( identifier[close] = keyword[None] ):
literal[string]
keyword[if] identifier[close] keyword[is] keyword[None] :
identifier[close] = identifier[InlineBackend] . identifier[instance] (). identifier[close_figures]
keyword[try] :
keyword[for] identifier[figure_manager] keyword[in] identifier[Gcf] . identifier[get_all_fig_managers] ():
identifier[send_figure] ( identifier[figure_manager] . identifier[canvas] . identifier[figure] )
keyword[finally] :
identifier[show] . identifier[_to_draw] =[]
keyword[if] identifier[close] :
identifier[matplotlib] . identifier[pyplot] . identifier[close] ( literal[string] )
|
def show(close=None):
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
"""
if close is None:
close = InlineBackend.instance().close_figures # depends on [control=['if'], data=['close']]
try:
for figure_manager in Gcf.get_all_fig_managers():
send_figure(figure_manager.canvas.figure) # depends on [control=['for'], data=['figure_manager']] # depends on [control=['try'], data=[]]
finally:
show._to_draw = []
if close:
matplotlib.pyplot.close('all') # depends on [control=['if'], data=[]]
|
def replace_activities(self):
"""Replace ative flags with Agent states when possible."""
logger.debug('Running PySB Preassembler replace activities')
# TODO: handle activity hierarchies
new_stmts = []
def has_agent_activity(stmt):
"""Return True if any agents in the Statement have activity."""
for agent in stmt.agent_list():
if isinstance(agent, Agent) and agent.activity is not None:
return True
return False
# First collect all explicit active forms
self._gather_active_forms()
# Iterate over all statements
for j, stmt in enumerate(self.statements):
logger.debug('%d/%d %s' % (j + 1, len(self.statements), stmt))
# If the Statement doesn't have any activities, we can just
# keep it and move on
if not has_agent_activity(stmt):
new_stmts.append(stmt)
continue
stmt_agents = stmt.agent_list()
num_agents = len(stmt_agents)
# Make a list with an empty list for each Agent so that later
# we can build combinations of Agent forms
agent_forms = [[] for a in stmt_agents]
for i, agent in enumerate(stmt_agents):
# This is the case where there is an activity flag on an
# Agent which we will attempt to replace with an explicit
# active form
if agent is not None and isinstance(agent, Agent) and \
agent.activity is not None:
base_agent = self.agent_set.get_create_base_agent(agent)
# If it is an "active" state
if agent.activity.is_active:
active_forms = base_agent.active_forms
# If no explicit active forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# If it is an "inactive" state
else:
active_forms = base_agent.inactive_forms
# If no explicit inactive forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# We now iterate over the active agent forms and create
# new agents
for af in active_forms:
new_agent = fast_deepcopy(agent)
self._set_agent_context(af, new_agent)
agent_forms[i].append(new_agent)
# Otherwise we just copy over the agent as is
else:
agent_forms[i].append(agent)
# Now create all possible combinations of the agents and create new
# statements as needed
agent_combs = itertools.product(*agent_forms)
for agent_comb in agent_combs:
new_stmt = fast_deepcopy(stmt)
new_stmt.set_agent_list(agent_comb)
new_stmts.append(new_stmt)
self.statements = new_stmts
|
def function[replace_activities, parameter[self]]:
constant[Replace ative flags with Agent states when possible.]
call[name[logger].debug, parameter[constant[Running PySB Preassembler replace activities]]]
variable[new_stmts] assign[=] list[[]]
def function[has_agent_activity, parameter[stmt]]:
constant[Return True if any agents in the Statement have activity.]
for taget[name[agent]] in starred[call[name[stmt].agent_list, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da2041d9120> begin[:]
return[constant[True]]
return[constant[False]]
call[name[self]._gather_active_forms, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2041da560>, <ast.Name object at 0x7da2041da620>]]] in starred[call[name[enumerate], parameter[name[self].statements]]] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[%d/%d %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da2041dbc40>, <ast.Call object at 0x7da2041d96c0>, <ast.Name object at 0x7da2041d9360>]]]]]
if <ast.UnaryOp object at 0x7da2041dba00> begin[:]
call[name[new_stmts].append, parameter[name[stmt]]]
continue
variable[stmt_agents] assign[=] call[name[stmt].agent_list, parameter[]]
variable[num_agents] assign[=] call[name[len], parameter[name[stmt_agents]]]
variable[agent_forms] assign[=] <ast.ListComp object at 0x7da18bc700d0>
for taget[tuple[[<ast.Name object at 0x7da18bc72da0>, <ast.Name object at 0x7da18bc73490>]]] in starred[call[name[enumerate], parameter[name[stmt_agents]]]] begin[:]
if <ast.BoolOp object at 0x7da18bc71ab0> begin[:]
variable[base_agent] assign[=] call[name[self].agent_set.get_create_base_agent, parameter[name[agent]]]
if name[agent].activity.is_active begin[:]
variable[active_forms] assign[=] name[base_agent].active_forms
if <ast.UnaryOp object at 0x7da2041d9810> begin[:]
variable[active_forms] assign[=] list[[<ast.Name object at 0x7da2041da9b0>]]
for taget[name[af]] in starred[name[active_forms]] begin[:]
variable[new_agent] assign[=] call[name[fast_deepcopy], parameter[name[agent]]]
call[name[self]._set_agent_context, parameter[name[af], name[new_agent]]]
call[call[name[agent_forms]][name[i]].append, parameter[name[new_agent]]]
variable[agent_combs] assign[=] call[name[itertools].product, parameter[<ast.Starred object at 0x7da18f722830>]]
for taget[name[agent_comb]] in starred[name[agent_combs]] begin[:]
variable[new_stmt] assign[=] call[name[fast_deepcopy], parameter[name[stmt]]]
call[name[new_stmt].set_agent_list, parameter[name[agent_comb]]]
call[name[new_stmts].append, parameter[name[new_stmt]]]
name[self].statements assign[=] name[new_stmts]
|
keyword[def] identifier[replace_activities] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[new_stmts] =[]
keyword[def] identifier[has_agent_activity] ( identifier[stmt] ):
literal[string]
keyword[for] identifier[agent] keyword[in] identifier[stmt] . identifier[agent_list] ():
keyword[if] identifier[isinstance] ( identifier[agent] , identifier[Agent] ) keyword[and] identifier[agent] . identifier[activity] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[True]
keyword[return] keyword[False]
identifier[self] . identifier[_gather_active_forms] ()
keyword[for] identifier[j] , identifier[stmt] keyword[in] identifier[enumerate] ( identifier[self] . identifier[statements] ):
identifier[logger] . identifier[debug] ( literal[string] %( identifier[j] + literal[int] , identifier[len] ( identifier[self] . identifier[statements] ), identifier[stmt] ))
keyword[if] keyword[not] identifier[has_agent_activity] ( identifier[stmt] ):
identifier[new_stmts] . identifier[append] ( identifier[stmt] )
keyword[continue]
identifier[stmt_agents] = identifier[stmt] . identifier[agent_list] ()
identifier[num_agents] = identifier[len] ( identifier[stmt_agents] )
identifier[agent_forms] =[[] keyword[for] identifier[a] keyword[in] identifier[stmt_agents] ]
keyword[for] identifier[i] , identifier[agent] keyword[in] identifier[enumerate] ( identifier[stmt_agents] ):
keyword[if] identifier[agent] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[agent] , identifier[Agent] ) keyword[and] identifier[agent] . identifier[activity] keyword[is] keyword[not] keyword[None] :
identifier[base_agent] = identifier[self] . identifier[agent_set] . identifier[get_create_base_agent] ( identifier[agent] )
keyword[if] identifier[agent] . identifier[activity] . identifier[is_active] :
identifier[active_forms] = identifier[base_agent] . identifier[active_forms]
keyword[if] keyword[not] identifier[active_forms] :
identifier[active_forms] =[ identifier[agent] ]
keyword[else] :
identifier[active_forms] = identifier[base_agent] . identifier[inactive_forms]
keyword[if] keyword[not] identifier[active_forms] :
identifier[active_forms] =[ identifier[agent] ]
keyword[for] identifier[af] keyword[in] identifier[active_forms] :
identifier[new_agent] = identifier[fast_deepcopy] ( identifier[agent] )
identifier[self] . identifier[_set_agent_context] ( identifier[af] , identifier[new_agent] )
identifier[agent_forms] [ identifier[i] ]. identifier[append] ( identifier[new_agent] )
keyword[else] :
identifier[agent_forms] [ identifier[i] ]. identifier[append] ( identifier[agent] )
identifier[agent_combs] = identifier[itertools] . identifier[product] (* identifier[agent_forms] )
keyword[for] identifier[agent_comb] keyword[in] identifier[agent_combs] :
identifier[new_stmt] = identifier[fast_deepcopy] ( identifier[stmt] )
identifier[new_stmt] . identifier[set_agent_list] ( identifier[agent_comb] )
identifier[new_stmts] . identifier[append] ( identifier[new_stmt] )
identifier[self] . identifier[statements] = identifier[new_stmts]
|
def replace_activities(self):
"""Replace ative flags with Agent states when possible."""
logger.debug('Running PySB Preassembler replace activities')
# TODO: handle activity hierarchies
new_stmts = []
def has_agent_activity(stmt):
"""Return True if any agents in the Statement have activity."""
for agent in stmt.agent_list():
if isinstance(agent, Agent) and agent.activity is not None:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['agent']]
return False
# First collect all explicit active forms
self._gather_active_forms()
# Iterate over all statements
for (j, stmt) in enumerate(self.statements):
logger.debug('%d/%d %s' % (j + 1, len(self.statements), stmt))
# If the Statement doesn't have any activities, we can just
# keep it and move on
if not has_agent_activity(stmt):
new_stmts.append(stmt)
continue # depends on [control=['if'], data=[]]
stmt_agents = stmt.agent_list()
num_agents = len(stmt_agents)
# Make a list with an empty list for each Agent so that later
# we can build combinations of Agent forms
agent_forms = [[] for a in stmt_agents]
for (i, agent) in enumerate(stmt_agents):
# This is the case where there is an activity flag on an
# Agent which we will attempt to replace with an explicit
# active form
if agent is not None and isinstance(agent, Agent) and (agent.activity is not None):
base_agent = self.agent_set.get_create_base_agent(agent)
# If it is an "active" state
if agent.activity.is_active:
active_forms = base_agent.active_forms
# If no explicit active forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# If it is an "inactive" state
active_forms = base_agent.inactive_forms
# If no explicit inactive forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent] # depends on [control=['if'], data=[]]
# We now iterate over the active agent forms and create
# new agents
for af in active_forms:
new_agent = fast_deepcopy(agent)
self._set_agent_context(af, new_agent)
agent_forms[i].append(new_agent) # depends on [control=['for'], data=['af']] # depends on [control=['if'], data=[]]
else:
# Otherwise we just copy over the agent as is
agent_forms[i].append(agent) # depends on [control=['for'], data=[]]
# Now create all possible combinations of the agents and create new
# statements as needed
agent_combs = itertools.product(*agent_forms)
for agent_comb in agent_combs:
new_stmt = fast_deepcopy(stmt)
new_stmt.set_agent_list(agent_comb)
new_stmts.append(new_stmt) # depends on [control=['for'], data=['agent_comb']] # depends on [control=['for'], data=[]]
self.statements = new_stmts
|
def load_plug_in(self, name):
"""Loads a DBGF plug-in.
in name of type str
The plug-in name or DLL. Special name 'all' loads all installed plug-ins.
return plug_in_name of type str
The name of the loaded plug-in.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
plug_in_name = self._call("loadPlugIn",
in_p=[name])
return plug_in_name
|
def function[load_plug_in, parameter[self, name]]:
constant[Loads a DBGF plug-in.
in name of type str
The plug-in name or DLL. Special name 'all' loads all installed plug-ins.
return plug_in_name of type str
The name of the loaded plug-in.
]
if <ast.UnaryOp object at 0x7da20e9b2860> begin[:]
<ast.Raise object at 0x7da20e9b0cd0>
variable[plug_in_name] assign[=] call[name[self]._call, parameter[constant[loadPlugIn]]]
return[name[plug_in_name]]
|
keyword[def] identifier[load_plug_in] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[plug_in_name] = identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[name] ])
keyword[return] identifier[plug_in_name]
|
def load_plug_in(self, name):
"""Loads a DBGF plug-in.
in name of type str
The plug-in name or DLL. Special name 'all' loads all installed plug-ins.
return plug_in_name of type str
The name of the loaded plug-in.
"""
if not isinstance(name, basestring):
raise TypeError('name can only be an instance of type basestring') # depends on [control=['if'], data=[]]
plug_in_name = self._call('loadPlugIn', in_p=[name])
return plug_in_name
|
def insertChild(self, childItem, position=None):
""" Inserts a child item to the current item.
The childItem must not yet have a parent (it will be set by this function).
IMPORTANT: this does not let the model know that items have been added.
Use BaseTreeModel.insertItem instead.
param childItem: a BaseTreeItem that will be added
param position: integer position before which the item will be added.
If position is None (default) the item will be appended at the end.
Returns childItem so that calls may be chained.
"""
if position is None:
position = self.nChildren()
assert childItem.parentItem is None, "childItem already has a parent: {}".format(childItem)
assert childItem._model is None, "childItem already has a model: {}".format(childItem)
childItem.parentItem = self
childItem.model = self.model
self.childItems.insert(position, childItem)
return childItem
|
def function[insertChild, parameter[self, childItem, position]]:
constant[ Inserts a child item to the current item.
The childItem must not yet have a parent (it will be set by this function).
IMPORTANT: this does not let the model know that items have been added.
Use BaseTreeModel.insertItem instead.
param childItem: a BaseTreeItem that will be added
param position: integer position before which the item will be added.
If position is None (default) the item will be appended at the end.
Returns childItem so that calls may be chained.
]
if compare[name[position] is constant[None]] begin[:]
variable[position] assign[=] call[name[self].nChildren, parameter[]]
assert[compare[name[childItem].parentItem is constant[None]]]
assert[compare[name[childItem]._model is constant[None]]]
name[childItem].parentItem assign[=] name[self]
name[childItem].model assign[=] name[self].model
call[name[self].childItems.insert, parameter[name[position], name[childItem]]]
return[name[childItem]]
|
keyword[def] identifier[insertChild] ( identifier[self] , identifier[childItem] , identifier[position] = keyword[None] ):
literal[string]
keyword[if] identifier[position] keyword[is] keyword[None] :
identifier[position] = identifier[self] . identifier[nChildren] ()
keyword[assert] identifier[childItem] . identifier[parentItem] keyword[is] keyword[None] , literal[string] . identifier[format] ( identifier[childItem] )
keyword[assert] identifier[childItem] . identifier[_model] keyword[is] keyword[None] , literal[string] . identifier[format] ( identifier[childItem] )
identifier[childItem] . identifier[parentItem] = identifier[self]
identifier[childItem] . identifier[model] = identifier[self] . identifier[model]
identifier[self] . identifier[childItems] . identifier[insert] ( identifier[position] , identifier[childItem] )
keyword[return] identifier[childItem]
|
def insertChild(self, childItem, position=None):
""" Inserts a child item to the current item.
The childItem must not yet have a parent (it will be set by this function).
IMPORTANT: this does not let the model know that items have been added.
Use BaseTreeModel.insertItem instead.
param childItem: a BaseTreeItem that will be added
param position: integer position before which the item will be added.
If position is None (default) the item will be appended at the end.
Returns childItem so that calls may be chained.
"""
if position is None:
position = self.nChildren() # depends on [control=['if'], data=['position']]
assert childItem.parentItem is None, 'childItem already has a parent: {}'.format(childItem)
assert childItem._model is None, 'childItem already has a model: {}'.format(childItem)
childItem.parentItem = self
childItem.model = self.model
self.childItems.insert(position, childItem)
return childItem
|
def send_magic_packet(*macs, **kwargs):
"""
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword '
'argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
sock.close()
|
def function[send_magic_packet, parameter[]]:
constant[
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
]
variable[packets] assign[=] list[[]]
variable[ip] assign[=] call[name[kwargs].pop, parameter[constant[ip_address], name[BROADCAST_IP]]]
variable[port] assign[=] call[name[kwargs].pop, parameter[constant[port], name[DEFAULT_PORT]]]
for taget[name[k]] in starred[name[kwargs]] begin[:]
<ast.Raise object at 0x7da1b0e5a950>
for taget[name[mac]] in starred[name[macs]] begin[:]
variable[packet] assign[=] call[name[create_magic_packet], parameter[name[mac]]]
call[name[packets].append, parameter[name[packet]]]
variable[sock] assign[=] call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_DGRAM]]
call[name[sock].setsockopt, parameter[name[socket].SOL_SOCKET, name[socket].SO_BROADCAST, constant[1]]]
call[name[sock].connect, parameter[tuple[[<ast.Name object at 0x7da1b0e5ac80>, <ast.Name object at 0x7da1b0e5a740>]]]]
for taget[name[packet]] in starred[name[packets]] begin[:]
call[name[sock].send, parameter[name[packet]]]
call[name[sock].close, parameter[]]
|
keyword[def] identifier[send_magic_packet] (* identifier[macs] ,** identifier[kwargs] ):
literal[string]
identifier[packets] =[]
identifier[ip] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[BROADCAST_IP] )
identifier[port] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_PORT] )
keyword[for] identifier[k] keyword[in] identifier[kwargs] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] . identifier[format] ( identifier[k] ))
keyword[for] identifier[mac] keyword[in] identifier[macs] :
identifier[packet] = identifier[create_magic_packet] ( identifier[mac] )
identifier[packets] . identifier[append] ( identifier[packet] )
identifier[sock] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_DGRAM] )
identifier[sock] . identifier[setsockopt] ( identifier[socket] . identifier[SOL_SOCKET] , identifier[socket] . identifier[SO_BROADCAST] , literal[int] )
identifier[sock] . identifier[connect] (( identifier[ip] , identifier[port] ))
keyword[for] identifier[packet] keyword[in] identifier[packets] :
identifier[sock] . identifier[send] ( identifier[packet] )
identifier[sock] . identifier[close] ()
|
def send_magic_packet(*macs, **kwargs):
"""
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword argument {!r}'.format(k)) # depends on [control=['for'], data=['k']]
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet) # depends on [control=['for'], data=['mac']]
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet) # depends on [control=['for'], data=['packet']]
sock.close()
|
def _getConfigData(self, all_dependencies, component, builddir, build_info_header_path):
''' returns (path_to_config_header, cmake_set_definitions) '''
# ordered_json, , read/write ordered json, internal
from yotta.lib import ordered_json
add_defs_header = ''
set_definitions = ''
# !!! backwards-compatible "TARGET_LIKE" definitions for the top-level
# of the config. NB: THESE WILL GO AWAY
definitions = []
definitions.append(('TARGET', sanitizePreprocessorSymbol(self.target.getName())))
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(self.target.getName()),None))
# make the path to the build-info header available both to CMake and
# in the preprocessor:
full_build_info_header_path = replaceBackslashes(os.path.abspath(build_info_header_path))
logger.debug('build info header include path: "%s"', full_build_info_header_path)
definitions.append(('YOTTA_BUILD_INFO_HEADER', '"'+full_build_info_header_path+'"'))
for target in self.target.getSimilarTo_Deprecated():
if '*' not in target:
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(target),None))
merged_config = self.target.getMergedConfig()
logger.debug('target configuration data: %s', merged_config)
definitions += self._definitionsForConfig(merged_config, ['YOTTA', 'CFG'])
add_defs_header += '// yotta config data (including backwards-compatible definitions)\n'
for k, v in definitions:
if v is not None:
add_defs_header += '#define %s %s\n' % (k, v)
set_definitions += 'set(%s %s)\n' % (k, v)
else:
add_defs_header += '#define %s\n' % k
set_definitions += 'set(%s TRUE)\n' % k
add_defs_header += '\n// version definitions\n'
for dep in list(all_dependencies.values()) + [component]:
add_defs_header += "#define YOTTA_%s_VERSION_STRING \"%s\"\n" % (sanitizePreprocessorSymbol(dep.getName()), str(dep.getVersion()))
add_defs_header += "#define YOTTA_%s_VERSION_MAJOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().major())
add_defs_header += "#define YOTTA_%s_VERSION_MINOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().minor())
add_defs_header += "#define YOTTA_%s_VERSION_PATCH %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().patch())
# add the component's definitions
defines = component.getDefines()
if defines:
add_defs_header += "\n// direct definitions (defines.json)\n"
for name, value in defines.items():
add_defs_header += "#define %s %s\n" % (name, value)
add_defs_header += '\n'
# use -include <definitions header> instead of lots of separate
# defines... this is compiler specific, but currently testing it
# out for gcc-compatible compilers only:
config_include_file = os.path.join(builddir, 'yotta_config.h')
config_json_file = os.path.join(builddir, 'yotta_config.json')
set_definitions += 'set(YOTTA_CONFIG_MERGED_JSON_FILE \"%s\")\n' % replaceBackslashes(os.path.abspath(config_json_file))
self._writeFile(
config_include_file,
'#ifndef __YOTTA_CONFIG_H__\n'+
'#define __YOTTA_CONFIG_H__\n'+
add_defs_header+
'#endif // ndef __YOTTA_CONFIG_H__\n'
)
self._writeFile(
config_json_file,
ordered_json.dumps(merged_config)
)
return (config_include_file, set_definitions, config_json_file)
|
def function[_getConfigData, parameter[self, all_dependencies, component, builddir, build_info_header_path]]:
constant[ returns (path_to_config_header, cmake_set_definitions) ]
from relative_module[yotta.lib] import module[ordered_json]
variable[add_defs_header] assign[=] constant[]
variable[set_definitions] assign[=] constant[]
variable[definitions] assign[=] list[[]]
call[name[definitions].append, parameter[tuple[[<ast.Constant object at 0x7da1b00dc640>, <ast.Call object at 0x7da1b00dc190>]]]]
call[name[definitions].append, parameter[tuple[[<ast.BinOp object at 0x7da1b00dc3a0>, <ast.Constant object at 0x7da1b00dc4c0>]]]]
variable[full_build_info_header_path] assign[=] call[name[replaceBackslashes], parameter[call[name[os].path.abspath, parameter[name[build_info_header_path]]]]]
call[name[logger].debug, parameter[constant[build info header include path: "%s"], name[full_build_info_header_path]]]
call[name[definitions].append, parameter[tuple[[<ast.Constant object at 0x7da1b00df580>, <ast.BinOp object at 0x7da1b00df790>]]]]
for taget[name[target]] in starred[call[name[self].target.getSimilarTo_Deprecated, parameter[]]] begin[:]
if compare[constant[*] <ast.NotIn object at 0x7da2590d7190> name[target]] begin[:]
call[name[definitions].append, parameter[tuple[[<ast.BinOp object at 0x7da1b00deda0>, <ast.Constant object at 0x7da1b00decb0>]]]]
variable[merged_config] assign[=] call[name[self].target.getMergedConfig, parameter[]]
call[name[logger].debug, parameter[constant[target configuration data: %s], name[merged_config]]]
<ast.AugAssign object at 0x7da1b00dc880>
<ast.AugAssign object at 0x7da1b00dc6a0>
for taget[tuple[[<ast.Name object at 0x7da1b00dc6d0>, <ast.Name object at 0x7da1b00dce50>]]] in starred[name[definitions]] begin[:]
if compare[name[v] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b00dd570>
<ast.AugAssign object at 0x7da1b00dd720>
<ast.AugAssign object at 0x7da1b00de290>
for taget[name[dep]] in starred[binary_operation[call[name[list], parameter[call[name[all_dependencies].values, parameter[]]]] + list[[<ast.Name object at 0x7da1b00ddcc0>]]]] begin[:]
<ast.AugAssign object at 0x7da1b00df6a0>
<ast.AugAssign object at 0x7da1b00ddd20>
<ast.AugAssign object at 0x7da1b00dd810>
<ast.AugAssign object at 0x7da1b00de140>
variable[defines] assign[=] call[name[component].getDefines, parameter[]]
if name[defines] begin[:]
<ast.AugAssign object at 0x7da1b00dd840>
for taget[tuple[[<ast.Name object at 0x7da1b00dcdc0>, <ast.Name object at 0x7da1b00dda50>]]] in starred[call[name[defines].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b00e0ac0>
<ast.AugAssign object at 0x7da1b00e0430>
variable[config_include_file] assign[=] call[name[os].path.join, parameter[name[builddir], constant[yotta_config.h]]]
variable[config_json_file] assign[=] call[name[os].path.join, parameter[name[builddir], constant[yotta_config.json]]]
<ast.AugAssign object at 0x7da1b00e2680>
call[name[self]._writeFile, parameter[name[config_include_file], binary_operation[binary_operation[binary_operation[constant[#ifndef __YOTTA_CONFIG_H__
] + constant[#define __YOTTA_CONFIG_H__
]] + name[add_defs_header]] + constant[#endif // ndef __YOTTA_CONFIG_H__
]]]]
call[name[self]._writeFile, parameter[name[config_json_file], call[name[ordered_json].dumps, parameter[name[merged_config]]]]]
return[tuple[[<ast.Name object at 0x7da1b00e2a70>, <ast.Name object at 0x7da1b00e3d60>, <ast.Name object at 0x7da1b00e0610>]]]
|
keyword[def] identifier[_getConfigData] ( identifier[self] , identifier[all_dependencies] , identifier[component] , identifier[builddir] , identifier[build_info_header_path] ):
literal[string]
keyword[from] identifier[yotta] . identifier[lib] keyword[import] identifier[ordered_json]
identifier[add_defs_header] = literal[string]
identifier[set_definitions] = literal[string]
identifier[definitions] =[]
identifier[definitions] . identifier[append] (( literal[string] , identifier[sanitizePreprocessorSymbol] ( identifier[self] . identifier[target] . identifier[getName] ())))
identifier[definitions] . identifier[append] (( literal[string] % identifier[sanitizePreprocessorSymbol] ( identifier[self] . identifier[target] . identifier[getName] ()), keyword[None] ))
identifier[full_build_info_header_path] = identifier[replaceBackslashes] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[build_info_header_path] ))
identifier[logger] . identifier[debug] ( literal[string] , identifier[full_build_info_header_path] )
identifier[definitions] . identifier[append] (( literal[string] , literal[string] + identifier[full_build_info_header_path] + literal[string] ))
keyword[for] identifier[target] keyword[in] identifier[self] . identifier[target] . identifier[getSimilarTo_Deprecated] ():
keyword[if] literal[string] keyword[not] keyword[in] identifier[target] :
identifier[definitions] . identifier[append] (( literal[string] % identifier[sanitizePreprocessorSymbol] ( identifier[target] ), keyword[None] ))
identifier[merged_config] = identifier[self] . identifier[target] . identifier[getMergedConfig] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[merged_config] )
identifier[definitions] += identifier[self] . identifier[_definitionsForConfig] ( identifier[merged_config] ,[ literal[string] , literal[string] ])
identifier[add_defs_header] += literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[definitions] :
keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] :
identifier[add_defs_header] += literal[string] %( identifier[k] , identifier[v] )
identifier[set_definitions] += literal[string] %( identifier[k] , identifier[v] )
keyword[else] :
identifier[add_defs_header] += literal[string] % identifier[k]
identifier[set_definitions] += literal[string] % identifier[k]
identifier[add_defs_header] += literal[string]
keyword[for] identifier[dep] keyword[in] identifier[list] ( identifier[all_dependencies] . identifier[values] ())+[ identifier[component] ]:
identifier[add_defs_header] += literal[string] %( identifier[sanitizePreprocessorSymbol] ( identifier[dep] . identifier[getName] ()), identifier[str] ( identifier[dep] . identifier[getVersion] ()))
identifier[add_defs_header] += literal[string] %( identifier[sanitizePreprocessorSymbol] ( identifier[dep] . identifier[getName] ()), identifier[dep] . identifier[getVersion] (). identifier[major] ())
identifier[add_defs_header] += literal[string] %( identifier[sanitizePreprocessorSymbol] ( identifier[dep] . identifier[getName] ()), identifier[dep] . identifier[getVersion] (). identifier[minor] ())
identifier[add_defs_header] += literal[string] %( identifier[sanitizePreprocessorSymbol] ( identifier[dep] . identifier[getName] ()), identifier[dep] . identifier[getVersion] (). identifier[patch] ())
identifier[defines] = identifier[component] . identifier[getDefines] ()
keyword[if] identifier[defines] :
identifier[add_defs_header] += literal[string]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[defines] . identifier[items] ():
identifier[add_defs_header] += literal[string] %( identifier[name] , identifier[value] )
identifier[add_defs_header] += literal[string]
identifier[config_include_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[builddir] , literal[string] )
identifier[config_json_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[builddir] , literal[string] )
identifier[set_definitions] += literal[string] % identifier[replaceBackslashes] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[config_json_file] ))
identifier[self] . identifier[_writeFile] (
identifier[config_include_file] ,
literal[string] +
literal[string] +
identifier[add_defs_header] +
literal[string]
)
identifier[self] . identifier[_writeFile] (
identifier[config_json_file] ,
identifier[ordered_json] . identifier[dumps] ( identifier[merged_config] )
)
keyword[return] ( identifier[config_include_file] , identifier[set_definitions] , identifier[config_json_file] )
|
def _getConfigData(self, all_dependencies, component, builddir, build_info_header_path):
""" returns (path_to_config_header, cmake_set_definitions) """
# ordered_json, , read/write ordered json, internal
from yotta.lib import ordered_json
add_defs_header = ''
set_definitions = ''
# !!! backwards-compatible "TARGET_LIKE" definitions for the top-level
# of the config. NB: THESE WILL GO AWAY
definitions = []
definitions.append(('TARGET', sanitizePreprocessorSymbol(self.target.getName())))
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(self.target.getName()), None))
# make the path to the build-info header available both to CMake and
# in the preprocessor:
full_build_info_header_path = replaceBackslashes(os.path.abspath(build_info_header_path))
logger.debug('build info header include path: "%s"', full_build_info_header_path)
definitions.append(('YOTTA_BUILD_INFO_HEADER', '"' + full_build_info_header_path + '"'))
for target in self.target.getSimilarTo_Deprecated():
if '*' not in target:
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(target), None)) # depends on [control=['if'], data=['target']] # depends on [control=['for'], data=['target']]
merged_config = self.target.getMergedConfig()
logger.debug('target configuration data: %s', merged_config)
definitions += self._definitionsForConfig(merged_config, ['YOTTA', 'CFG'])
add_defs_header += '// yotta config data (including backwards-compatible definitions)\n'
for (k, v) in definitions:
if v is not None:
add_defs_header += '#define %s %s\n' % (k, v)
set_definitions += 'set(%s %s)\n' % (k, v) # depends on [control=['if'], data=['v']]
else:
add_defs_header += '#define %s\n' % k
set_definitions += 'set(%s TRUE)\n' % k # depends on [control=['for'], data=[]]
add_defs_header += '\n// version definitions\n'
for dep in list(all_dependencies.values()) + [component]:
add_defs_header += '#define YOTTA_%s_VERSION_STRING "%s"\n' % (sanitizePreprocessorSymbol(dep.getName()), str(dep.getVersion()))
add_defs_header += '#define YOTTA_%s_VERSION_MAJOR %d\n' % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().major())
add_defs_header += '#define YOTTA_%s_VERSION_MINOR %d\n' % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().minor())
add_defs_header += '#define YOTTA_%s_VERSION_PATCH %d\n' % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().patch()) # depends on [control=['for'], data=['dep']]
# add the component's definitions
defines = component.getDefines()
if defines:
add_defs_header += '\n// direct definitions (defines.json)\n'
for (name, value) in defines.items():
add_defs_header += '#define %s %s\n' % (name, value) # depends on [control=['for'], data=[]]
add_defs_header += '\n' # depends on [control=['if'], data=[]]
# use -include <definitions header> instead of lots of separate
# defines... this is compiler specific, but currently testing it
# out for gcc-compatible compilers only:
config_include_file = os.path.join(builddir, 'yotta_config.h')
config_json_file = os.path.join(builddir, 'yotta_config.json')
set_definitions += 'set(YOTTA_CONFIG_MERGED_JSON_FILE "%s")\n' % replaceBackslashes(os.path.abspath(config_json_file))
self._writeFile(config_include_file, '#ifndef __YOTTA_CONFIG_H__\n' + '#define __YOTTA_CONFIG_H__\n' + add_defs_header + '#endif // ndef __YOTTA_CONFIG_H__\n')
self._writeFile(config_json_file, ordered_json.dumps(merged_config))
return (config_include_file, set_definitions, config_json_file)
|
def _chunks(self, l, n):
""" Yield n successive chunks from a list l.
"""
l.sort()
newn = int(1.0 * len(l) / n + 0.5)
for i in range(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
|
def function[_chunks, parameter[self, l, n]]:
constant[ Yield n successive chunks from a list l.
]
call[name[l].sort, parameter[]]
variable[newn] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[constant[1.0] * call[name[len], parameter[name[l]]]] / name[n]] + constant[0.5]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[name[n] - constant[1]]]]] begin[:]
<ast.Yield object at 0x7da18f09dfc0>
<ast.Yield object at 0x7da18f09f490>
|
keyword[def] identifier[_chunks] ( identifier[self] , identifier[l] , identifier[n] ):
literal[string]
identifier[l] . identifier[sort] ()
identifier[newn] = identifier[int] ( literal[int] * identifier[len] ( identifier[l] )/ identifier[n] + literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] - literal[int] ):
keyword[yield] identifier[l] [ identifier[i] * identifier[newn] : identifier[i] * identifier[newn] + identifier[newn] ]
keyword[yield] identifier[l] [ identifier[n] * identifier[newn] - identifier[newn] :]
|
def _chunks(self, l, n):
""" Yield n successive chunks from a list l.
"""
l.sort()
newn = int(1.0 * len(l) / n + 0.5)
for i in range(0, n - 1):
yield l[i * newn:i * newn + newn] # depends on [control=['for'], data=['i']]
yield l[n * newn - newn:]
|
def type_assert_dict(
d,
kcls=None,
vcls=None,
allow_none=False,
cast_from=None,
cast_to=None,
dynamic=None,
objcls=None,
ctor=None,
):
""" Checks that every key/value in @d is an instance of @kcls: @vcls
Will also unmarshal JSON objects to Python objects if
the value is an instance of dict and @vcls is a class type
Args:
d: The dict to type assert
kcls: The class to type assert for keys.
NOTE: JSON only allows str keys
vcls: The class to type assert for values
allow_none: Allow a None value for the values.
This would not make sense for the keys.
cast_from: type-or-tuple-of-types, If @obj is an instance
of this type(s), cast it to @cast_to
cast_to: type, The type to cast @obj to if it's an instance
of @cast_from, or None to cast to @cls.
If you need more than type(x), use a lambda or
factory function.
dynamic: @cls, A dynamic default value if @d is None,
and @dynamic is not None.
objcls: None-or-type, a type to assert @d is,
ie: dict, etc...
Note that isinstance considers
collections.OrderedDict to be of type dict
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
Returns:
@d, note that @d will be recreated, which
may be a performance concern if @d has many items
Raises:
TypeError: if a key is not an instance of @kcls or
a value is not an instance of @vcls
"""
_check_dstruct(d, objcls)
if (
d is None
and
dynamic is not None
):
d = dynamic
t = type(d)
return t(
(
_check(k, kcls) if kcls else k,
_check(
v,
vcls,
allow_none,
cast_from,
cast_to,
ctor=ctor,
) if vcls else v,
)
for k, v in d.items()
)
|
def function[type_assert_dict, parameter[d, kcls, vcls, allow_none, cast_from, cast_to, dynamic, objcls, ctor]]:
constant[ Checks that every key/value in @d is an instance of @kcls: @vcls
Will also unmarshal JSON objects to Python objects if
the value is an instance of dict and @vcls is a class type
Args:
d: The dict to type assert
kcls: The class to type assert for keys.
NOTE: JSON only allows str keys
vcls: The class to type assert for values
allow_none: Allow a None value for the values.
This would not make sense for the keys.
cast_from: type-or-tuple-of-types, If @obj is an instance
of this type(s), cast it to @cast_to
cast_to: type, The type to cast @obj to if it's an instance
of @cast_from, or None to cast to @cls.
If you need more than type(x), use a lambda or
factory function.
dynamic: @cls, A dynamic default value if @d is None,
and @dynamic is not None.
objcls: None-or-type, a type to assert @d is,
ie: dict, etc...
Note that isinstance considers
collections.OrderedDict to be of type dict
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
Returns:
@d, note that @d will be recreated, which
may be a performance concern if @d has many items
Raises:
TypeError: if a key is not an instance of @kcls or
a value is not an instance of @vcls
]
call[name[_check_dstruct], parameter[name[d], name[objcls]]]
if <ast.BoolOp object at 0x7da20c796470> begin[:]
variable[d] assign[=] name[dynamic]
variable[t] assign[=] call[name[type], parameter[name[d]]]
return[call[name[t], parameter[<ast.GeneratorExp object at 0x7da20c795210>]]]
|
keyword[def] identifier[type_assert_dict] (
identifier[d] ,
identifier[kcls] = keyword[None] ,
identifier[vcls] = keyword[None] ,
identifier[allow_none] = keyword[False] ,
identifier[cast_from] = keyword[None] ,
identifier[cast_to] = keyword[None] ,
identifier[dynamic] = keyword[None] ,
identifier[objcls] = keyword[None] ,
identifier[ctor] = keyword[None] ,
):
literal[string]
identifier[_check_dstruct] ( identifier[d] , identifier[objcls] )
keyword[if] (
identifier[d] keyword[is] keyword[None]
keyword[and]
identifier[dynamic] keyword[is] keyword[not] keyword[None]
):
identifier[d] = identifier[dynamic]
identifier[t] = identifier[type] ( identifier[d] )
keyword[return] identifier[t] (
(
identifier[_check] ( identifier[k] , identifier[kcls] ) keyword[if] identifier[kcls] keyword[else] identifier[k] ,
identifier[_check] (
identifier[v] ,
identifier[vcls] ,
identifier[allow_none] ,
identifier[cast_from] ,
identifier[cast_to] ,
identifier[ctor] = identifier[ctor] ,
) keyword[if] identifier[vcls] keyword[else] identifier[v] ,
)
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] ()
)
|
def type_assert_dict(d, kcls=None, vcls=None, allow_none=False, cast_from=None, cast_to=None, dynamic=None, objcls=None, ctor=None):
""" Checks that every key/value in @d is an instance of @kcls: @vcls
Will also unmarshal JSON objects to Python objects if
the value is an instance of dict and @vcls is a class type
Args:
d: The dict to type assert
kcls: The class to type assert for keys.
NOTE: JSON only allows str keys
vcls: The class to type assert for values
allow_none: Allow a None value for the values.
This would not make sense for the keys.
cast_from: type-or-tuple-of-types, If @obj is an instance
of this type(s), cast it to @cast_to
cast_to: type, The type to cast @obj to if it's an instance
of @cast_from, or None to cast to @cls.
If you need more than type(x), use a lambda or
factory function.
dynamic: @cls, A dynamic default value if @d is None,
and @dynamic is not None.
objcls: None-or-type, a type to assert @d is,
ie: dict, etc...
Note that isinstance considers
collections.OrderedDict to be of type dict
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
Returns:
@d, note that @d will be recreated, which
may be a performance concern if @d has many items
Raises:
TypeError: if a key is not an instance of @kcls or
a value is not an instance of @vcls
"""
_check_dstruct(d, objcls)
if d is None and dynamic is not None:
d = dynamic # depends on [control=['if'], data=[]]
t = type(d)
return t(((_check(k, kcls) if kcls else k, _check(v, vcls, allow_none, cast_from, cast_to, ctor=ctor) if vcls else v) for (k, v) in d.items()))
|
def DebugLog(self):
"Devolver y limpiar la bitácora de depuración"
if self.Log:
msg = self.Log.getvalue()
# limpiar log
self.Log.close()
self.Log = None
else:
msg = u''
return msg
|
def function[DebugLog, parameter[self]]:
constant[Devolver y limpiar la bitácora de depuración]
if name[self].Log begin[:]
variable[msg] assign[=] call[name[self].Log.getvalue, parameter[]]
call[name[self].Log.close, parameter[]]
name[self].Log assign[=] constant[None]
return[name[msg]]
|
keyword[def] identifier[DebugLog] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[Log] :
identifier[msg] = identifier[self] . identifier[Log] . identifier[getvalue] ()
identifier[self] . identifier[Log] . identifier[close] ()
identifier[self] . identifier[Log] = keyword[None]
keyword[else] :
identifier[msg] = literal[string]
keyword[return] identifier[msg]
|
def DebugLog(self):
"""Devolver y limpiar la bitácora de depuración"""
if self.Log:
msg = self.Log.getvalue()
# limpiar log
self.Log.close()
self.Log = None # depends on [control=['if'], data=[]]
else:
msg = u''
return msg
|
def address(self, is_compressed=None):
"""
Return the public address representation of this key, if available.
"""
return self._network.address.for_p2pkh(self.hash160(is_compressed=is_compressed))
|
def function[address, parameter[self, is_compressed]]:
constant[
Return the public address representation of this key, if available.
]
return[call[name[self]._network.address.for_p2pkh, parameter[call[name[self].hash160, parameter[]]]]]
|
keyword[def] identifier[address] ( identifier[self] , identifier[is_compressed] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_network] . identifier[address] . identifier[for_p2pkh] ( identifier[self] . identifier[hash160] ( identifier[is_compressed] = identifier[is_compressed] ))
|
def address(self, is_compressed=None):
"""
Return the public address representation of this key, if available.
"""
return self._network.address.for_p2pkh(self.hash160(is_compressed=is_compressed))
|
def set(self, hue):
"""Set cursor position on the color corresponding to the hue value."""
x = hue / 360. * self.winfo_width()
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(hue)
|
def function[set, parameter[self, hue]]:
constant[Set cursor position on the color corresponding to the hue value.]
variable[x] assign[=] binary_operation[binary_operation[name[hue] / constant[360.0]] * call[name[self].winfo_width, parameter[]]]
call[name[self].coords, parameter[constant[cursor], name[x], constant[0], name[x], call[name[self].winfo_height, parameter[]]]]
call[name[self]._variable.set, parameter[name[hue]]]
|
keyword[def] identifier[set] ( identifier[self] , identifier[hue] ):
literal[string]
identifier[x] = identifier[hue] / literal[int] * identifier[self] . identifier[winfo_width] ()
identifier[self] . identifier[coords] ( literal[string] , identifier[x] , literal[int] , identifier[x] , identifier[self] . identifier[winfo_height] ())
identifier[self] . identifier[_variable] . identifier[set] ( identifier[hue] )
|
def set(self, hue):
"""Set cursor position on the color corresponding to the hue value."""
x = hue / 360.0 * self.winfo_width()
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(hue)
|
def filters(self):
"""Return filters from query string.
:return list: filter information
"""
results = []
filters = self.qs.get('filter')
if filters is not None:
try:
results.extend(json.loads(filters))
except (ValueError, TypeError):
raise InvalidFilters("Parse error")
if self._get_key_values('filter['):
results.extend(self._simple_filters(self._get_key_values('filter[')))
return results
|
def function[filters, parameter[self]]:
constant[Return filters from query string.
:return list: filter information
]
variable[results] assign[=] list[[]]
variable[filters] assign[=] call[name[self].qs.get, parameter[constant[filter]]]
if compare[name[filters] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b1632110>
if call[name[self]._get_key_values, parameter[constant[filter[]]] begin[:]
call[name[results].extend, parameter[call[name[self]._simple_filters, parameter[call[name[self]._get_key_values, parameter[constant[filter[]]]]]]]
return[name[results]]
|
keyword[def] identifier[filters] ( identifier[self] ):
literal[string]
identifier[results] =[]
identifier[filters] = identifier[self] . identifier[qs] . identifier[get] ( literal[string] )
keyword[if] identifier[filters] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[results] . identifier[extend] ( identifier[json] . identifier[loads] ( identifier[filters] ))
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[raise] identifier[InvalidFilters] ( literal[string] )
keyword[if] identifier[self] . identifier[_get_key_values] ( literal[string] ):
identifier[results] . identifier[extend] ( identifier[self] . identifier[_simple_filters] ( identifier[self] . identifier[_get_key_values] ( literal[string] )))
keyword[return] identifier[results]
|
def filters(self):
"""Return filters from query string.
:return list: filter information
"""
results = []
filters = self.qs.get('filter')
if filters is not None:
try:
results.extend(json.loads(filters)) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
raise InvalidFilters('Parse error') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['filters']]
if self._get_key_values('filter['):
results.extend(self._simple_filters(self._get_key_values('filter['))) # depends on [control=['if'], data=[]]
return results
|
def is_unique(keys, *, scope=None, comparison_operators=None, present_optional=False,
message=None):
"""Returns a Validation that makes sure the given value is unique for a table and optionally a
scope.
"""
def check(pname, validator):
"""Checks that a value is unique in its column, with an optional scope."""
# pylint: disable=too-many-branches
model = validator.model
data_access = validator.data_access
pkname = model.primary_key_name
pkey = model.primary_key
if isinstance(keys, str):
key = getattr(model, keys)
if present_optional and key is None:
return True
if comparison_operators:
if isinstance(comparison_operators, str):
op = comparison_operators
else:
op = comparison_operators[0]
else:
op = " = "
constraints = [(keys, key, op)]
else:
if comparison_operators:
ops = comparison_operators
else:
ops = [" = "] * len(keys)
constraints = list(zip(keys, [getattr(model, key) for key in keys], ops))
if scope:
if comparison_operators:
ops = comparison_operators[len(constraints):]
else:
ops = [" = "] * len(scope)
constraints.extend(zip(scope, [getattr(model, col) for col in scope], ops))
dupe = data_access.find(model.table_name, constraints, columns=pkname)
if dupe is None:
return True
if isinstance(pkname, str):
return dupe[0] == pkey
else:
return tuple(dupe) == tuple(pkey)
return Validation(check, keys, message or "is already taken", is_simple=False)
|
def function[is_unique, parameter[keys]]:
constant[Returns a Validation that makes sure the given value is unique for a table and optionally a
scope.
]
def function[check, parameter[pname, validator]]:
constant[Checks that a value is unique in its column, with an optional scope.]
variable[model] assign[=] name[validator].model
variable[data_access] assign[=] name[validator].data_access
variable[pkname] assign[=] name[model].primary_key_name
variable[pkey] assign[=] name[model].primary_key
if call[name[isinstance], parameter[name[keys], name[str]]] begin[:]
variable[key] assign[=] call[name[getattr], parameter[name[model], name[keys]]]
if <ast.BoolOp object at 0x7da1b13512d0> begin[:]
return[constant[True]]
if name[comparison_operators] begin[:]
if call[name[isinstance], parameter[name[comparison_operators], name[str]]] begin[:]
variable[op] assign[=] name[comparison_operators]
variable[constraints] assign[=] list[[<ast.Tuple object at 0x7da1b13516f0>]]
if name[scope] begin[:]
if name[comparison_operators] begin[:]
variable[ops] assign[=] call[name[comparison_operators]][<ast.Slice object at 0x7da1b15d4910>]
call[name[constraints].extend, parameter[call[name[zip], parameter[name[scope], <ast.ListComp object at 0x7da1b15d5330>, name[ops]]]]]
variable[dupe] assign[=] call[name[data_access].find, parameter[name[model].table_name, name[constraints]]]
if compare[name[dupe] is constant[None]] begin[:]
return[constant[True]]
if call[name[isinstance], parameter[name[pkname], name[str]]] begin[:]
return[compare[call[name[dupe]][constant[0]] equal[==] name[pkey]]]
return[call[name[Validation], parameter[name[check], name[keys], <ast.BoolOp object at 0x7da1b1352e90>]]]
|
keyword[def] identifier[is_unique] ( identifier[keys] ,*, identifier[scope] = keyword[None] , identifier[comparison_operators] = keyword[None] , identifier[present_optional] = keyword[False] ,
identifier[message] = keyword[None] ):
literal[string]
keyword[def] identifier[check] ( identifier[pname] , identifier[validator] ):
literal[string]
identifier[model] = identifier[validator] . identifier[model]
identifier[data_access] = identifier[validator] . identifier[data_access]
identifier[pkname] = identifier[model] . identifier[primary_key_name]
identifier[pkey] = identifier[model] . identifier[primary_key]
keyword[if] identifier[isinstance] ( identifier[keys] , identifier[str] ):
identifier[key] = identifier[getattr] ( identifier[model] , identifier[keys] )
keyword[if] identifier[present_optional] keyword[and] identifier[key] keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[if] identifier[comparison_operators] :
keyword[if] identifier[isinstance] ( identifier[comparison_operators] , identifier[str] ):
identifier[op] = identifier[comparison_operators]
keyword[else] :
identifier[op] = identifier[comparison_operators] [ literal[int] ]
keyword[else] :
identifier[op] = literal[string]
identifier[constraints] =[( identifier[keys] , identifier[key] , identifier[op] )]
keyword[else] :
keyword[if] identifier[comparison_operators] :
identifier[ops] = identifier[comparison_operators]
keyword[else] :
identifier[ops] =[ literal[string] ]* identifier[len] ( identifier[keys] )
identifier[constraints] = identifier[list] ( identifier[zip] ( identifier[keys] ,[ identifier[getattr] ( identifier[model] , identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[keys] ], identifier[ops] ))
keyword[if] identifier[scope] :
keyword[if] identifier[comparison_operators] :
identifier[ops] = identifier[comparison_operators] [ identifier[len] ( identifier[constraints] ):]
keyword[else] :
identifier[ops] =[ literal[string] ]* identifier[len] ( identifier[scope] )
identifier[constraints] . identifier[extend] ( identifier[zip] ( identifier[scope] ,[ identifier[getattr] ( identifier[model] , identifier[col] ) keyword[for] identifier[col] keyword[in] identifier[scope] ], identifier[ops] ))
identifier[dupe] = identifier[data_access] . identifier[find] ( identifier[model] . identifier[table_name] , identifier[constraints] , identifier[columns] = identifier[pkname] )
keyword[if] identifier[dupe] keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[if] identifier[isinstance] ( identifier[pkname] , identifier[str] ):
keyword[return] identifier[dupe] [ literal[int] ]== identifier[pkey]
keyword[else] :
keyword[return] identifier[tuple] ( identifier[dupe] )== identifier[tuple] ( identifier[pkey] )
keyword[return] identifier[Validation] ( identifier[check] , identifier[keys] , identifier[message] keyword[or] literal[string] , identifier[is_simple] = keyword[False] )
|
def is_unique(keys, *, scope=None, comparison_operators=None, present_optional=False, message=None):
"""Returns a Validation that makes sure the given value is unique for a table and optionally a
scope.
"""
def check(pname, validator):
"""Checks that a value is unique in its column, with an optional scope."""
# pylint: disable=too-many-branches
model = validator.model
data_access = validator.data_access
pkname = model.primary_key_name
pkey = model.primary_key
if isinstance(keys, str):
key = getattr(model, keys)
if present_optional and key is None:
return True # depends on [control=['if'], data=[]]
if comparison_operators:
if isinstance(comparison_operators, str):
op = comparison_operators # depends on [control=['if'], data=[]]
else:
op = comparison_operators[0] # depends on [control=['if'], data=[]]
else:
op = ' = '
constraints = [(keys, key, op)] # depends on [control=['if'], data=[]]
else:
if comparison_operators:
ops = comparison_operators # depends on [control=['if'], data=[]]
else:
ops = [' = '] * len(keys)
constraints = list(zip(keys, [getattr(model, key) for key in keys], ops))
if scope:
if comparison_operators:
ops = comparison_operators[len(constraints):] # depends on [control=['if'], data=[]]
else:
ops = [' = '] * len(scope)
constraints.extend(zip(scope, [getattr(model, col) for col in scope], ops)) # depends on [control=['if'], data=[]]
dupe = data_access.find(model.table_name, constraints, columns=pkname)
if dupe is None:
return True # depends on [control=['if'], data=[]]
if isinstance(pkname, str):
return dupe[0] == pkey # depends on [control=['if'], data=[]]
else:
return tuple(dupe) == tuple(pkey)
return Validation(check, keys, message or 'is already taken', is_simple=False)
|
def commits(self, branch, since=0, to=int(time.time()) + 86400):
"""For given branch return a list of commits.
Each commit contains basic information about itself.
Raises GithubException if rate limit is exceeded
:param branch: git branch
:type branch: str
:param since: minimal timestamp for commit's commit date
:type since: int
:param to: maximal timestamp for commit's commit date
:type to: int
"""
if self.github.get_rate_limit().rate.limit == 0:
raise GithubException
commits = {}
since_dt = datetime.datetime.fromtimestamp(since)
to_dt = datetime.datetime.fromtimestamp(to)
for commit in self.repo.get_commits(sha=branch, since=since_dt, until=to_dt):
commits[commit.sha] = self._commitData(commit)
return commits
|
def function[commits, parameter[self, branch, since, to]]:
constant[For given branch return a list of commits.
Each commit contains basic information about itself.
Raises GithubException if rate limit is exceeded
:param branch: git branch
:type branch: str
:param since: minimal timestamp for commit's commit date
:type since: int
:param to: maximal timestamp for commit's commit date
:type to: int
]
if compare[call[name[self].github.get_rate_limit, parameter[]].rate.limit equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b24e3f10>
variable[commits] assign[=] dictionary[[], []]
variable[since_dt] assign[=] call[name[datetime].datetime.fromtimestamp, parameter[name[since]]]
variable[to_dt] assign[=] call[name[datetime].datetime.fromtimestamp, parameter[name[to]]]
for taget[name[commit]] in starred[call[name[self].repo.get_commits, parameter[]]] begin[:]
call[name[commits]][name[commit].sha] assign[=] call[name[self]._commitData, parameter[name[commit]]]
return[name[commits]]
|
keyword[def] identifier[commits] ( identifier[self] , identifier[branch] , identifier[since] = literal[int] , identifier[to] = identifier[int] ( identifier[time] . identifier[time] ())+ literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[github] . identifier[get_rate_limit] (). identifier[rate] . identifier[limit] == literal[int] :
keyword[raise] identifier[GithubException]
identifier[commits] ={}
identifier[since_dt] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[since] )
identifier[to_dt] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[to] )
keyword[for] identifier[commit] keyword[in] identifier[self] . identifier[repo] . identifier[get_commits] ( identifier[sha] = identifier[branch] , identifier[since] = identifier[since_dt] , identifier[until] = identifier[to_dt] ):
identifier[commits] [ identifier[commit] . identifier[sha] ]= identifier[self] . identifier[_commitData] ( identifier[commit] )
keyword[return] identifier[commits]
|
def commits(self, branch, since=0, to=int(time.time()) + 86400):
"""For given branch return a list of commits.
Each commit contains basic information about itself.
Raises GithubException if rate limit is exceeded
:param branch: git branch
:type branch: str
:param since: minimal timestamp for commit's commit date
:type since: int
:param to: maximal timestamp for commit's commit date
:type to: int
"""
if self.github.get_rate_limit().rate.limit == 0:
raise GithubException # depends on [control=['if'], data=[]]
commits = {}
since_dt = datetime.datetime.fromtimestamp(since)
to_dt = datetime.datetime.fromtimestamp(to)
for commit in self.repo.get_commits(sha=branch, since=since_dt, until=to_dt):
commits[commit.sha] = self._commitData(commit) # depends on [control=['for'], data=['commit']]
return commits
|
def coverage(self):
"""
Get the fraction of this title sequence that is matched by its reads.
@return: The C{float} fraction of the title sequence matched by its
reads.
"""
intervals = ReadIntervals(self.subjectLength)
for hsp in self.hsps():
intervals.add(hsp.subjectStart, hsp.subjectEnd)
return intervals.coverage()
|
def function[coverage, parameter[self]]:
constant[
Get the fraction of this title sequence that is matched by its reads.
@return: The C{float} fraction of the title sequence matched by its
reads.
]
variable[intervals] assign[=] call[name[ReadIntervals], parameter[name[self].subjectLength]]
for taget[name[hsp]] in starred[call[name[self].hsps, parameter[]]] begin[:]
call[name[intervals].add, parameter[name[hsp].subjectStart, name[hsp].subjectEnd]]
return[call[name[intervals].coverage, parameter[]]]
|
keyword[def] identifier[coverage] ( identifier[self] ):
literal[string]
identifier[intervals] = identifier[ReadIntervals] ( identifier[self] . identifier[subjectLength] )
keyword[for] identifier[hsp] keyword[in] identifier[self] . identifier[hsps] ():
identifier[intervals] . identifier[add] ( identifier[hsp] . identifier[subjectStart] , identifier[hsp] . identifier[subjectEnd] )
keyword[return] identifier[intervals] . identifier[coverage] ()
|
def coverage(self):
"""
Get the fraction of this title sequence that is matched by its reads.
@return: The C{float} fraction of the title sequence matched by its
reads.
"""
intervals = ReadIntervals(self.subjectLength)
for hsp in self.hsps():
intervals.add(hsp.subjectStart, hsp.subjectEnd) # depends on [control=['for'], data=['hsp']]
return intervals.coverage()
|
def factory(data):
"""Tahoma Event factory."""
if data['name'] is "DeviceStateChangedEvent":
return DeviceStateChangedEvent(data)
elif data['name'] is "ExecutionStateChangedEvent":
return ExecutionStateChangedEvent(data)
elif data['name'] is "CommandExecutionStateChangedEvent":
return CommandExecutionStateChangedEvent(data)
else:
raise ValueError("Unknown event '" + data['name'] + "' occurred.")
|
def function[factory, parameter[data]]:
constant[Tahoma Event factory.]
if compare[call[name[data]][constant[name]] is constant[DeviceStateChangedEvent]] begin[:]
return[call[name[DeviceStateChangedEvent], parameter[name[data]]]]
|
keyword[def] identifier[factory] ( identifier[data] ):
literal[string]
keyword[if] identifier[data] [ literal[string] ] keyword[is] literal[string] :
keyword[return] identifier[DeviceStateChangedEvent] ( identifier[data] )
keyword[elif] identifier[data] [ literal[string] ] keyword[is] literal[string] :
keyword[return] identifier[ExecutionStateChangedEvent] ( identifier[data] )
keyword[elif] identifier[data] [ literal[string] ] keyword[is] literal[string] :
keyword[return] identifier[CommandExecutionStateChangedEvent] ( identifier[data] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[data] [ literal[string] ]+ literal[string] )
|
def factory(data):
"""Tahoma Event factory."""
if data['name'] is 'DeviceStateChangedEvent':
return DeviceStateChangedEvent(data) # depends on [control=['if'], data=[]]
elif data['name'] is 'ExecutionStateChangedEvent':
return ExecutionStateChangedEvent(data) # depends on [control=['if'], data=[]]
elif data['name'] is 'CommandExecutionStateChangedEvent':
return CommandExecutionStateChangedEvent(data) # depends on [control=['if'], data=[]]
else:
raise ValueError("Unknown event '" + data['name'] + "' occurred.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.