repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStreamTransport.enqueue_message | def enqueue_message(self, message, timeout):
"""Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send.
"""
# Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued.
if message.command == 'WRTE':
self._send_command('OKAY', timeout=timeout)
elif message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
self.message_queue.put(message) | python | def enqueue_message(self, message, timeout):
"""Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send.
"""
# Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued.
if message.command == 'WRTE':
self._send_command('OKAY', timeout=timeout)
elif message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
self.message_queue.put(message) | [
"def",
"enqueue_message",
"(",
"self",
",",
"message",
",",
"timeout",
")",
":",
"# Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued.",
"if",
"message",
".",
"command",
"==",
"'WRTE'",
":",
"self",
".",
"_send_command",
"(",
"'OKAY'",
",",
"timeo... | Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send. | [
"Add",
"the",
"given",
"message",
"to",
"this",
"transport",
"s",
"queue",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L440-L455 | train | 221,800 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStreamTransport.write | def write(self, data, timeout):
"""Write data to this stream, using the given timeouts.PolledTimeout."""
if not self.remote_id:
raise usb_exceptions.AdbStreamClosedError(
'Cannot write() to half-opened %s', self)
if self.closed_state != self.ClosedState.OPEN:
raise usb_exceptions.AdbStreamClosedError(
'Cannot write() to closed %s', self)
elif self._expecting_okay:
raise usb_exceptions.AdbProtocolError(
'Previous WRTE failed, %s in unknown state', self)
# Make sure we only have one WRTE in flight at a time, because ADB doesn't
# identify which WRTE it is ACK'ing when it sends the OKAY message back.
with self._write_lock:
self._expecting_okay = True
self._send_command('WRTE', timeout, data)
self._read_messages_until_true(lambda: not self._expecting_okay, timeout) | python | def write(self, data, timeout):
"""Write data to this stream, using the given timeouts.PolledTimeout."""
if not self.remote_id:
raise usb_exceptions.AdbStreamClosedError(
'Cannot write() to half-opened %s', self)
if self.closed_state != self.ClosedState.OPEN:
raise usb_exceptions.AdbStreamClosedError(
'Cannot write() to closed %s', self)
elif self._expecting_okay:
raise usb_exceptions.AdbProtocolError(
'Previous WRTE failed, %s in unknown state', self)
# Make sure we only have one WRTE in flight at a time, because ADB doesn't
# identify which WRTE it is ACK'ing when it sends the OKAY message back.
with self._write_lock:
self._expecting_okay = True
self._send_command('WRTE', timeout, data)
self._read_messages_until_true(lambda: not self._expecting_okay, timeout) | [
"def",
"write",
"(",
"self",
",",
"data",
",",
"timeout",
")",
":",
"if",
"not",
"self",
".",
"remote_id",
":",
"raise",
"usb_exceptions",
".",
"AdbStreamClosedError",
"(",
"'Cannot write() to half-opened %s'",
",",
"self",
")",
"if",
"self",
".",
"closed_stat... | Write data to this stream, using the given timeouts.PolledTimeout. | [
"Write",
"data",
"to",
"this",
"stream",
"using",
"the",
"given",
"timeouts",
".",
"PolledTimeout",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L457-L474 | train | 221,801 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStreamTransport.read | def read(self, length, timeout):
"""Read 'length' bytes from this stream transport.
Args:
length: If not 0, read this many bytes from the stream, otherwise read all
available data (at least one byte).
timeout: timeouts.PolledTimeout to use for this read operation.
Returns:
The bytes read from this stream.
"""
self._read_messages_until_true(
lambda: self._buffer_size and self._buffer_size >= length, timeout)
with self._read_buffer_lock:
data, push_back = ''.join(self._read_buffer), ''
if length:
data, push_back = data[:length], data[length:]
self._read_buffer.clear()
self._buffer_size = len(push_back)
if push_back:
self._read_buffer.appendleft(push_back)
return data | python | def read(self, length, timeout):
"""Read 'length' bytes from this stream transport.
Args:
length: If not 0, read this many bytes from the stream, otherwise read all
available data (at least one byte).
timeout: timeouts.PolledTimeout to use for this read operation.
Returns:
The bytes read from this stream.
"""
self._read_messages_until_true(
lambda: self._buffer_size and self._buffer_size >= length, timeout)
with self._read_buffer_lock:
data, push_back = ''.join(self._read_buffer), ''
if length:
data, push_back = data[:length], data[length:]
self._read_buffer.clear()
self._buffer_size = len(push_back)
if push_back:
self._read_buffer.appendleft(push_back)
return data | [
"def",
"read",
"(",
"self",
",",
"length",
",",
"timeout",
")",
":",
"self",
".",
"_read_messages_until_true",
"(",
"lambda",
":",
"self",
".",
"_buffer_size",
"and",
"self",
".",
"_buffer_size",
">=",
"length",
",",
"timeout",
")",
"with",
"self",
".",
... | Read 'length' bytes from this stream transport.
Args:
length: If not 0, read this many bytes from the stream, otherwise read all
available data (at least one byte).
timeout: timeouts.PolledTimeout to use for this read operation.
Returns:
The bytes read from this stream. | [
"Read",
"length",
"bytes",
"from",
"this",
"stream",
"transport",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L476-L498 | train | 221,802 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbConnection._make_stream_transport | def _make_stream_transport(self):
"""Create an AdbStreamTransport with a newly allocated local_id."""
msg_queue = queue.Queue()
with self._stream_transport_map_lock:
# Start one past the last id we used, and grab the first available one.
# This mimics the ADB behavior of 'increment an unsigned and let it
# overflow', but with a check to ensure we don't reuse an id in use,
# even though that's unlikely with 2^32 - 1 of them available. We try
# at most 64 id's, if we've wrapped around and there isn't one available
# in the first 64, there's a problem, better to fail fast than hang for
# a potentially very long time (STREAM_ID_LIMIT can be very large).
self._last_id_used = (self._last_id_used % STREAM_ID_LIMIT) + 1
for local_id in itertools.islice(
itertools.chain(
range(self._last_id_used, STREAM_ID_LIMIT),
range(1, self._last_id_used)), 64):
if local_id not in list(self._stream_transport_map.keys()):
self._last_id_used = local_id
break
else:
raise usb_exceptions.AdbStreamUnavailableError('Ran out of local ids!')
# Ignore this warning - the for loop will always have at least one
# iteration, so local_id will always be set.
# pylint: disable=undefined-loop-variable
stream_transport = AdbStreamTransport(self, local_id, msg_queue)
self._stream_transport_map[local_id] = stream_transport
return stream_transport | python | def _make_stream_transport(self):
"""Create an AdbStreamTransport with a newly allocated local_id."""
msg_queue = queue.Queue()
with self._stream_transport_map_lock:
# Start one past the last id we used, and grab the first available one.
# This mimics the ADB behavior of 'increment an unsigned and let it
# overflow', but with a check to ensure we don't reuse an id in use,
# even though that's unlikely with 2^32 - 1 of them available. We try
# at most 64 id's, if we've wrapped around and there isn't one available
# in the first 64, there's a problem, better to fail fast than hang for
# a potentially very long time (STREAM_ID_LIMIT can be very large).
self._last_id_used = (self._last_id_used % STREAM_ID_LIMIT) + 1
for local_id in itertools.islice(
itertools.chain(
range(self._last_id_used, STREAM_ID_LIMIT),
range(1, self._last_id_used)), 64):
if local_id not in list(self._stream_transport_map.keys()):
self._last_id_used = local_id
break
else:
raise usb_exceptions.AdbStreamUnavailableError('Ran out of local ids!')
# Ignore this warning - the for loop will always have at least one
# iteration, so local_id will always be set.
# pylint: disable=undefined-loop-variable
stream_transport = AdbStreamTransport(self, local_id, msg_queue)
self._stream_transport_map[local_id] = stream_transport
return stream_transport | [
"def",
"_make_stream_transport",
"(",
"self",
")",
":",
"msg_queue",
"=",
"queue",
".",
"Queue",
"(",
")",
"with",
"self",
".",
"_stream_transport_map_lock",
":",
"# Start one past the last id we used, and grab the first available one.",
"# This mimics the ADB behavior of 'incr... | Create an AdbStreamTransport with a newly allocated local_id. | [
"Create",
"an",
"AdbStreamTransport",
"with",
"a",
"newly",
"allocated",
"local_id",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L568-L594 | train | 221,803 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbConnection._handle_message_for_stream | def _handle_message_for_stream(self, stream_transport, message, timeout):
"""Handle an incoming message, check if it's for the given stream.
If the message is not for the stream, then add it to the appropriate
message queue.
Args:
stream_transport: AdbStreamTransport currently waiting on a message.
message: Message to check and handle.
timeout: Timeout to use for the operation, should be an instance of
timeouts.PolledTimeout.
Returns:
The message read if it was for this stream, None otherwise.
Raises:
AdbProtocolError: If we receive an unexpected message type.
"""
if message.command not in ('OKAY', 'CLSE', 'WRTE'):
raise usb_exceptions.AdbProtocolError(
'%s received unexpected message: %s', self, message)
if message.arg1 == stream_transport.local_id:
# Ack writes immediately.
if message.command == 'WRTE':
# Make sure we don't get a WRTE before an OKAY/CLSE message.
if not stream_transport.remote_id:
raise usb_exceptions.AdbProtocolError(
'%s received WRTE before OKAY/CLSE: %s',
stream_transport, message)
self.transport.write_message(adb_message.AdbMessage(
'OKAY', stream_transport.local_id, stream_transport.remote_id),
timeout)
elif message.command == 'CLSE':
self.close_stream_transport(stream_transport, timeout)
return message
else:
# Message was not for this stream, add it to the right stream's queue.
with self._stream_transport_map_lock:
dest_transport = self._stream_transport_map.get(message.arg1)
if dest_transport:
if message.command == 'CLSE':
self.close_stream_transport(dest_transport, timeout)
dest_transport.enqueue_message(message, timeout)
else:
_LOG.warning('Received message for unknown local-id: %s', message) | python | def _handle_message_for_stream(self, stream_transport, message, timeout):
"""Handle an incoming message, check if it's for the given stream.
If the message is not for the stream, then add it to the appropriate
message queue.
Args:
stream_transport: AdbStreamTransport currently waiting on a message.
message: Message to check and handle.
timeout: Timeout to use for the operation, should be an instance of
timeouts.PolledTimeout.
Returns:
The message read if it was for this stream, None otherwise.
Raises:
AdbProtocolError: If we receive an unexpected message type.
"""
if message.command not in ('OKAY', 'CLSE', 'WRTE'):
raise usb_exceptions.AdbProtocolError(
'%s received unexpected message: %s', self, message)
if message.arg1 == stream_transport.local_id:
# Ack writes immediately.
if message.command == 'WRTE':
# Make sure we don't get a WRTE before an OKAY/CLSE message.
if not stream_transport.remote_id:
raise usb_exceptions.AdbProtocolError(
'%s received WRTE before OKAY/CLSE: %s',
stream_transport, message)
self.transport.write_message(adb_message.AdbMessage(
'OKAY', stream_transport.local_id, stream_transport.remote_id),
timeout)
elif message.command == 'CLSE':
self.close_stream_transport(stream_transport, timeout)
return message
else:
# Message was not for this stream, add it to the right stream's queue.
with self._stream_transport_map_lock:
dest_transport = self._stream_transport_map.get(message.arg1)
if dest_transport:
if message.command == 'CLSE':
self.close_stream_transport(dest_transport, timeout)
dest_transport.enqueue_message(message, timeout)
else:
_LOG.warning('Received message for unknown local-id: %s', message) | [
"def",
"_handle_message_for_stream",
"(",
"self",
",",
"stream_transport",
",",
"message",
",",
"timeout",
")",
":",
"if",
"message",
".",
"command",
"not",
"in",
"(",
"'OKAY'",
",",
"'CLSE'",
",",
"'WRTE'",
")",
":",
"raise",
"usb_exceptions",
".",
"AdbProt... | Handle an incoming message, check if it's for the given stream.
If the message is not for the stream, then add it to the appropriate
message queue.
Args:
stream_transport: AdbStreamTransport currently waiting on a message.
message: Message to check and handle.
timeout: Timeout to use for the operation, should be an instance of
timeouts.PolledTimeout.
Returns:
The message read if it was for this stream, None otherwise.
Raises:
AdbProtocolError: If we receive an unexpected message type. | [
"Handle",
"an",
"incoming",
"message",
"check",
"if",
"it",
"s",
"for",
"the",
"given",
"stream",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L596-L642 | train | 221,804 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbConnection.open_stream | def open_stream(self, destination, timeout_ms=None):
"""Opens a new stream to a destination service on the device.
Not the same as the posix 'open' or any other Open methods, this
corresponds to the OPEN message described in the ADB protocol
documentation mentioned above. It creates a stream (uniquely identified
by remote/local ids) that connects to a particular service endpoint.
Args:
destination: The service:command string, see ADB documentation.
timeout_ms: Timeout in milliseconds for the Open to succeed (or as a
PolledTimeout object).
Raises:
AdbProtocolError: Wrong local_id sent to us, or we didn't get a ready
response.
Returns:
An AdbStream object that can be used to read/write data to the specified
service endpoint, or None if the requested service couldn't be opened.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream_transport = self._make_stream_transport()
self.transport.write_message(
adb_message.AdbMessage(
command='OPEN',
arg0=stream_transport.local_id, arg1=0,
data=destination + '\0'),
timeout)
if not stream_transport.ensure_opened(timeout):
return None
return AdbStream(destination, stream_transport) | python | def open_stream(self, destination, timeout_ms=None):
"""Opens a new stream to a destination service on the device.
Not the same as the posix 'open' or any other Open methods, this
corresponds to the OPEN message described in the ADB protocol
documentation mentioned above. It creates a stream (uniquely identified
by remote/local ids) that connects to a particular service endpoint.
Args:
destination: The service:command string, see ADB documentation.
timeout_ms: Timeout in milliseconds for the Open to succeed (or as a
PolledTimeout object).
Raises:
AdbProtocolError: Wrong local_id sent to us, or we didn't get a ready
response.
Returns:
An AdbStream object that can be used to read/write data to the specified
service endpoint, or None if the requested service couldn't be opened.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream_transport = self._make_stream_transport()
self.transport.write_message(
adb_message.AdbMessage(
command='OPEN',
arg0=stream_transport.local_id, arg1=0,
data=destination + '\0'),
timeout)
if not stream_transport.ensure_opened(timeout):
return None
return AdbStream(destination, stream_transport) | [
"def",
"open_stream",
"(",
"self",
",",
"destination",
",",
"timeout_ms",
"=",
"None",
")",
":",
"timeout",
"=",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
"stream_transport",
"=",
"self",
".",
"_make_stream_transport",
"(",
... | Opens a new stream to a destination service on the device.
Not the same as the posix 'open' or any other Open methods, this
corresponds to the OPEN message described in the ADB protocol
documentation mentioned above. It creates a stream (uniquely identified
by remote/local ids) that connects to a particular service endpoint.
Args:
destination: The service:command string, see ADB documentation.
timeout_ms: Timeout in milliseconds for the Open to succeed (or as a
PolledTimeout object).
Raises:
AdbProtocolError: Wrong local_id sent to us, or we didn't get a ready
response.
Returns:
An AdbStream object that can be used to read/write data to the specified
service endpoint, or None if the requested service couldn't be opened. | [
"Opens",
"a",
"new",
"stream",
"to",
"a",
"destination",
"service",
"on",
"the",
"device",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L648-L680 | train | 221,805 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbConnection.close_stream_transport | def close_stream_transport(self, stream_transport, timeout):
"""Remove the given stream transport's id from our map of id's.
If the stream id is actually removed, we send a CLSE message to let the
remote end know (this happens when we are ack'ing a CLSE message we
received). The ADB protocol doesn't say this is a requirement, but ADB
does it, so we do too.
Args:
stream_transport: The stream transport to close.
timeout: Timeout on the operation.
Returns:
True if the id was removed and message sent, False if it was already
missing from the stream map (already closed).
"""
with self._stream_transport_map_lock:
if stream_transport.local_id in self._stream_transport_map:
del self._stream_transport_map[stream_transport.local_id]
# If we never got a remote_id, there's no CLSE message to send.
if stream_transport.remote_id:
self.transport.write_message(adb_message.AdbMessage(
'CLSE', stream_transport.local_id, stream_transport.remote_id),
timeout)
return True
return False | python | def close_stream_transport(self, stream_transport, timeout):
"""Remove the given stream transport's id from our map of id's.
If the stream id is actually removed, we send a CLSE message to let the
remote end know (this happens when we are ack'ing a CLSE message we
received). The ADB protocol doesn't say this is a requirement, but ADB
does it, so we do too.
Args:
stream_transport: The stream transport to close.
timeout: Timeout on the operation.
Returns:
True if the id was removed and message sent, False if it was already
missing from the stream map (already closed).
"""
with self._stream_transport_map_lock:
if stream_transport.local_id in self._stream_transport_map:
del self._stream_transport_map[stream_transport.local_id]
# If we never got a remote_id, there's no CLSE message to send.
if stream_transport.remote_id:
self.transport.write_message(adb_message.AdbMessage(
'CLSE', stream_transport.local_id, stream_transport.remote_id),
timeout)
return True
return False | [
"def",
"close_stream_transport",
"(",
"self",
",",
"stream_transport",
",",
"timeout",
")",
":",
"with",
"self",
".",
"_stream_transport_map_lock",
":",
"if",
"stream_transport",
".",
"local_id",
"in",
"self",
".",
"_stream_transport_map",
":",
"del",
"self",
".",... | Remove the given stream transport's id from our map of id's.
If the stream id is actually removed, we send a CLSE message to let the
remote end know (this happens when we are ack'ing a CLSE message we
received). The ADB protocol doesn't say this is a requirement, but ADB
does it, so we do too.
Args:
stream_transport: The stream transport to close.
timeout: Timeout on the operation.
Returns:
True if the id was removed and message sent, False if it was already
missing from the stream map (already closed). | [
"Remove",
"the",
"given",
"stream",
"transport",
"s",
"id",
"from",
"our",
"map",
"of",
"id",
"s",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L682-L707 | train | 221,806 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbConnection.streaming_command | def streaming_command(self, service, command='', timeout_ms=None):
"""One complete set of packets for a single command.
Helper function to call open_stream and yield the output. Sends
service:command in a new connection, reading the data for the response. All
the data is held in memory, large responses will be slow and can fill up
memory.
Args:
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for the entire command, in milliseconds (or as a
PolledTimeout object).
Yields:
The data contained in the responses from the service.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream = self.open_stream('%s:%s' % (service, command), timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'%s does not support service: %s', self, service)
for data in stream.read_until_close(timeout):
yield data | python | def streaming_command(self, service, command='', timeout_ms=None):
"""One complete set of packets for a single command.
Helper function to call open_stream and yield the output. Sends
service:command in a new connection, reading the data for the response. All
the data is held in memory, large responses will be slow and can fill up
memory.
Args:
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for the entire command, in milliseconds (or as a
PolledTimeout object).
Yields:
The data contained in the responses from the service.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream = self.open_stream('%s:%s' % (service, command), timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'%s does not support service: %s', self, service)
for data in stream.read_until_close(timeout):
yield data | [
"def",
"streaming_command",
"(",
"self",
",",
"service",
",",
"command",
"=",
"''",
",",
"timeout_ms",
"=",
"None",
")",
":",
"timeout",
"=",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
"stream",
"=",
"self",
".",
"open_st... | One complete set of packets for a single command.
Helper function to call open_stream and yield the output. Sends
service:command in a new connection, reading the data for the response. All
the data is held in memory, large responses will be slow and can fill up
memory.
Args:
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for the entire command, in milliseconds (or as a
PolledTimeout object).
Yields:
The data contained in the responses from the service. | [
"One",
"complete",
"set",
"of",
"packets",
"for",
"a",
"single",
"command",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L709-L732 | train | 221,807 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbConnection.read_for_stream | def read_for_stream(self, stream_transport, timeout_ms=None):
"""Attempt to read a packet for the given stream transport.
Will read packets from self.transport until one intended for the given
AdbStream is found. If another thread is already reading packets, this will
block until that thread reads a packet for this stream, or timeout expires.
Note that this method always returns None, but if a packet was read for the
given stream, it will have been added to that stream's message queue.
This is somewhat tricky to do - first we check if there's a message already
in our queue. If not, then we try to use our AdbConnection to read a
message for this stream.
If some other thread is already doing reads, then read_for_stream() will sit
in a tight loop, with a short delay, checking our message queue for a
message from the other thread.
Note that we must pass the queue in from the AdbStream, rather than looking
it up in the AdbConnection's map, because the AdbConnection may have
removed the queue from its map (while it still had messages in it). The
AdbStream itself maintains a reference to the queue to avoid dropping those
messages.
The AdbMessage read is guaranteed to be one of 'OKAY', 'WRTE', or 'CLSE'.
If it was a WRTE message, then it will have been automatically ACK'd with an
OKAY message, if it was a CLSE message it will have been ACK'd with a
corresponding CLSE message, and this AdbStream will be marked as closed.
Args:
stream_transport: The AdbStreamTransport for the stream that is reading
an AdbMessage from this AdbConnection.
timeout_ms: If provided, timeout, in milliseconds, to use. Note this
timeout applies to this entire call, not for each individual Read, since
there may be multiple reads if messages for other streams are read.
This argument may be a timeouts.PolledTimeout.
Returns:
AdbMessage that was read, guaranteed to be one of 'OKAY', 'CLSE', or
'WRTE' command.
Raises:
AdbTimeoutError: If we don't get a packet for this stream before
timeout expires.
AdbStreamClosedError: If the given stream has been closed.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
# Bail when the timeout expires, or when we no longer have the given stream
# in our map (it may have been closed, we don't want to leave a thread
# hanging in this loop when that happens).
while (not timeout.has_expired() and
stream_transport.local_id in self._stream_transport_map):
try:
# Block for up to 10ms to rate-limit how fast we spin.
return stream_transport.message_queue.get(True, .01)
except queue.Empty:
pass
# If someone else has the Lock, just keep checking our queue.
if not self._reader_lock.acquire(False):
continue
try:
# Now that we've acquired the Lock, we have to check the queue again,
# just in case someone had the Lock but hadn't yet added our message
# to the queue when we checked the first time. Now that we have the
# Lock ourselves, we're sure there are no potentially in-flight reads.
try:
return stream_transport.message_queue.get_nowait()
except queue.Empty:
pass
while not timeout.has_expired():
msg = self._handle_message_for_stream(
stream_transport, self.transport.read_message(timeout), timeout)
if msg:
return msg
finally:
self._reader_lock.release()
if timeout.has_expired():
raise usb_exceptions.AdbTimeoutError(
'Read timed out for %s', stream_transport)
# The stream is no longer in the map, so it's closed, but check for any
# queued messages.
try:
return stream_transport.message_queue.get_nowait()
except queue.Empty:
raise usb_exceptions.AdbStreamClosedError(
'Attempt to read from closed or unknown %s', stream_transport) | python | def read_for_stream(self, stream_transport, timeout_ms=None):
"""Attempt to read a packet for the given stream transport.
Will read packets from self.transport until one intended for the given
AdbStream is found. If another thread is already reading packets, this will
block until that thread reads a packet for this stream, or timeout expires.
Note that this method always returns None, but if a packet was read for the
given stream, it will have been added to that stream's message queue.
This is somewhat tricky to do - first we check if there's a message already
in our queue. If not, then we try to use our AdbConnection to read a
message for this stream.
If some other thread is already doing reads, then read_for_stream() will sit
in a tight loop, with a short delay, checking our message queue for a
message from the other thread.
Note that we must pass the queue in from the AdbStream, rather than looking
it up in the AdbConnection's map, because the AdbConnection may have
removed the queue from its map (while it still had messages in it). The
AdbStream itself maintains a reference to the queue to avoid dropping those
messages.
The AdbMessage read is guaranteed to be one of 'OKAY', 'WRTE', or 'CLSE'.
If it was a WRTE message, then it will have been automatically ACK'd with an
OKAY message, if it was a CLSE message it will have been ACK'd with a
corresponding CLSE message, and this AdbStream will be marked as closed.
Args:
stream_transport: The AdbStreamTransport for the stream that is reading
an AdbMessage from this AdbConnection.
timeout_ms: If provided, timeout, in milliseconds, to use. Note this
timeout applies to this entire call, not for each individual Read, since
there may be multiple reads if messages for other streams are read.
This argument may be a timeouts.PolledTimeout.
Returns:
AdbMessage that was read, guaranteed to be one of 'OKAY', 'CLSE', or
'WRTE' command.
Raises:
AdbTimeoutError: If we don't get a packet for this stream before
timeout expires.
AdbStreamClosedError: If the given stream has been closed.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
# Bail when the timeout expires, or when we no longer have the given stream
# in our map (it may have been closed, we don't want to leave a thread
# hanging in this loop when that happens).
while (not timeout.has_expired() and
stream_transport.local_id in self._stream_transport_map):
try:
# Block for up to 10ms to rate-limit how fast we spin.
return stream_transport.message_queue.get(True, .01)
except queue.Empty:
pass
# If someone else has the Lock, just keep checking our queue.
if not self._reader_lock.acquire(False):
continue
try:
# Now that we've acquired the Lock, we have to check the queue again,
# just in case someone had the Lock but hadn't yet added our message
# to the queue when we checked the first time. Now that we have the
# Lock ourselves, we're sure there are no potentially in-flight reads.
try:
return stream_transport.message_queue.get_nowait()
except queue.Empty:
pass
while not timeout.has_expired():
msg = self._handle_message_for_stream(
stream_transport, self.transport.read_message(timeout), timeout)
if msg:
return msg
finally:
self._reader_lock.release()
if timeout.has_expired():
raise usb_exceptions.AdbTimeoutError(
'Read timed out for %s', stream_transport)
# The stream is no longer in the map, so it's closed, but check for any
# queued messages.
try:
return stream_transport.message_queue.get_nowait()
except queue.Empty:
raise usb_exceptions.AdbStreamClosedError(
'Attempt to read from closed or unknown %s', stream_transport) | [
"def",
"read_for_stream",
"(",
"self",
",",
"stream_transport",
",",
"timeout_ms",
"=",
"None",
")",
":",
"timeout",
"=",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
"# Bail when the timeout expires, or when we no longer have the given st... | Attempt to read a packet for the given stream transport.
Will read packets from self.transport until one intended for the given
AdbStream is found. If another thread is already reading packets, this will
block until that thread reads a packet for this stream, or timeout expires.
Note that this method always returns None, but if a packet was read for the
given stream, it will have been added to that stream's message queue.
This is somewhat tricky to do - first we check if there's a message already
in our queue. If not, then we try to use our AdbConnection to read a
message for this stream.
If some other thread is already doing reads, then read_for_stream() will sit
in a tight loop, with a short delay, checking our message queue for a
message from the other thread.
Note that we must pass the queue in from the AdbStream, rather than looking
it up in the AdbConnection's map, because the AdbConnection may have
removed the queue from its map (while it still had messages in it). The
AdbStream itself maintains a reference to the queue to avoid dropping those
messages.
The AdbMessage read is guaranteed to be one of 'OKAY', 'WRTE', or 'CLSE'.
If it was a WRTE message, then it will have been automatically ACK'd with an
OKAY message, if it was a CLSE message it will have been ACK'd with a
corresponding CLSE message, and this AdbStream will be marked as closed.
Args:
stream_transport: The AdbStreamTransport for the stream that is reading
an AdbMessage from this AdbConnection.
timeout_ms: If provided, timeout, in milliseconds, to use. Note this
timeout applies to this entire call, not for each individual Read, since
there may be multiple reads if messages for other streams are read.
This argument may be a timeouts.PolledTimeout.
Returns:
AdbMessage that was read, guaranteed to be one of 'OKAY', 'CLSE', or
'WRTE' command.
Raises:
AdbTimeoutError: If we don't get a packet for this stream before
timeout expires.
AdbStreamClosedError: If the given stream has been closed. | [
"Attempt",
"to",
"read",
"a",
"packet",
"for",
"the",
"given",
"stream",
"transport",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L734-L823 | train | 221,808 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbConnection.connect | def connect(cls, transport, rsa_keys=None, timeout_ms=1000,
auth_timeout_ms=100):
"""Establish a new connection to a device, connected via transport.
Args:
transport: A transport to use for reads/writes from/to the device,
usually an instance of UsbHandle, but really it can be anything with
read() and write() methods.
rsa_keys: List of AuthSigner subclass instances to be used for
authentication. The device can either accept one of these via the sign
method, or we will send the result of get_public_key from the first one
if the device doesn't accept any of them.
timeout_ms: Timeout to wait for the device to respond to our CNXN
request. Actual timeout may take longer if the transport object passed
has a longer default timeout than timeout_ms, or if auth_timeout_ms is
longer than timeout_ms and public key auth is used. This argument may
be a PolledTimeout object.
auth_timeout_ms: Timeout to wait for when sending a new public key. This
is only relevant when we send a new public key. The device shows a
dialog and this timeout is how long to wait for that dialog. If used
in automation, this should be low to catch such a case as a failure
quickly; while in interactive settings it should be high to allow
users to accept the dialog. We default to automation here, so it's low
by default. This argument may be a PolledTimeout object.
Returns:
An instance of AdbConnection that is connected to the device.
Raises:
usb_exceptions.DeviceAuthError: When the device expects authentication,
but we weren't given any valid keys.
usb_exceptions.AdbProtocolError: When the device does authentication in an
unexpected way, or fails to respond appropriately to our CNXN request.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
if ADB_MESSAGE_LOG:
adb_transport = adb_message.DebugAdbTransportAdapter(transport)
else:
adb_transport = adb_message.AdbTransportAdapter(transport)
adb_transport.write_message(
adb_message.AdbMessage(
command='CNXN', arg0=ADB_VERSION, arg1=MAX_ADB_DATA,
data='host::%s\0' % ADB_BANNER),
timeout)
msg = adb_transport.read_until(('AUTH', 'CNXN'), timeout)
if msg.command == 'CNXN':
return cls(adb_transport, msg.arg1, msg.data)
# We got an AUTH response, so we have to try to authenticate.
if not rsa_keys:
raise usb_exceptions.DeviceAuthError(
'Device authentication required, no keys available.')
# Loop through our keys, signing the last 'banner' or token.
for rsa_key in rsa_keys:
if msg.arg0 != cls.AUTH_TOKEN:
raise usb_exceptions.AdbProtocolError('Bad AUTH response: %s', msg)
signed_token = rsa_key.sign(msg.data)
adb_transport.write_message(
adb_message.AdbMessage(
command='AUTH', arg0=cls.AUTH_SIGNATURE, arg1=0,
data=signed_token),
timeout)
msg = adb_transport.read_until(('AUTH', 'CNXN'), timeout)
if msg.command == 'CNXN':
return cls(adb_transport, msg.arg1, msg.data)
# None of the keys worked, so send a public key.
adb_transport.write_message(
adb_message.AdbMessage(
command='AUTH', arg0=cls.AUTH_RSAPUBLICKEY, arg1=0,
data=rsa_keys[0].get_public_key() + '\0'),
timeout)
try:
msg = adb_transport.read_until(
('CNXN',), timeouts.PolledTimeout.from_millis(auth_timeout_ms))
except usb_exceptions.UsbReadFailedError as exception:
if exception.is_timeout():
exceptions.reraise(usb_exceptions.DeviceAuthError,
'Accept auth key on device, then retry.')
raise
# The read didn't time-out, so we got a CNXN response.
return cls(adb_transport, msg.arg1, msg.data) | python | def connect(cls, transport, rsa_keys=None, timeout_ms=1000,
auth_timeout_ms=100):
"""Establish a new connection to a device, connected via transport.
Args:
transport: A transport to use for reads/writes from/to the device,
usually an instance of UsbHandle, but really it can be anything with
read() and write() methods.
rsa_keys: List of AuthSigner subclass instances to be used for
authentication. The device can either accept one of these via the sign
method, or we will send the result of get_public_key from the first one
if the device doesn't accept any of them.
timeout_ms: Timeout to wait for the device to respond to our CNXN
request. Actual timeout may take longer if the transport object passed
has a longer default timeout than timeout_ms, or if auth_timeout_ms is
longer than timeout_ms and public key auth is used. This argument may
be a PolledTimeout object.
auth_timeout_ms: Timeout to wait for when sending a new public key. This
is only relevant when we send a new public key. The device shows a
dialog and this timeout is how long to wait for that dialog. If used
in automation, this should be low to catch such a case as a failure
quickly; while in interactive settings it should be high to allow
users to accept the dialog. We default to automation here, so it's low
by default. This argument may be a PolledTimeout object.
Returns:
An instance of AdbConnection that is connected to the device.
Raises:
usb_exceptions.DeviceAuthError: When the device expects authentication,
but we weren't given any valid keys.
usb_exceptions.AdbProtocolError: When the device does authentication in an
unexpected way, or fails to respond appropriately to our CNXN request.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
if ADB_MESSAGE_LOG:
adb_transport = adb_message.DebugAdbTransportAdapter(transport)
else:
adb_transport = adb_message.AdbTransportAdapter(transport)
adb_transport.write_message(
adb_message.AdbMessage(
command='CNXN', arg0=ADB_VERSION, arg1=MAX_ADB_DATA,
data='host::%s\0' % ADB_BANNER),
timeout)
msg = adb_transport.read_until(('AUTH', 'CNXN'), timeout)
if msg.command == 'CNXN':
return cls(adb_transport, msg.arg1, msg.data)
# We got an AUTH response, so we have to try to authenticate.
if not rsa_keys:
raise usb_exceptions.DeviceAuthError(
'Device authentication required, no keys available.')
# Loop through our keys, signing the last 'banner' or token.
for rsa_key in rsa_keys:
if msg.arg0 != cls.AUTH_TOKEN:
raise usb_exceptions.AdbProtocolError('Bad AUTH response: %s', msg)
signed_token = rsa_key.sign(msg.data)
adb_transport.write_message(
adb_message.AdbMessage(
command='AUTH', arg0=cls.AUTH_SIGNATURE, arg1=0,
data=signed_token),
timeout)
msg = adb_transport.read_until(('AUTH', 'CNXN'), timeout)
if msg.command == 'CNXN':
return cls(adb_transport, msg.arg1, msg.data)
# None of the keys worked, so send a public key.
adb_transport.write_message(
adb_message.AdbMessage(
command='AUTH', arg0=cls.AUTH_RSAPUBLICKEY, arg1=0,
data=rsa_keys[0].get_public_key() + '\0'),
timeout)
try:
msg = adb_transport.read_until(
('CNXN',), timeouts.PolledTimeout.from_millis(auth_timeout_ms))
except usb_exceptions.UsbReadFailedError as exception:
if exception.is_timeout():
exceptions.reraise(usb_exceptions.DeviceAuthError,
'Accept auth key on device, then retry.')
raise
# The read didn't time-out, so we got a CNXN response.
return cls(adb_transport, msg.arg1, msg.data) | [
"def",
"connect",
"(",
"cls",
",",
"transport",
",",
"rsa_keys",
"=",
"None",
",",
"timeout_ms",
"=",
"1000",
",",
"auth_timeout_ms",
"=",
"100",
")",
":",
"timeout",
"=",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
"if",
... | Establish a new connection to a device, connected via transport.
Args:
transport: A transport to use for reads/writes from/to the device,
usually an instance of UsbHandle, but really it can be anything with
read() and write() methods.
rsa_keys: List of AuthSigner subclass instances to be used for
authentication. The device can either accept one of these via the sign
method, or we will send the result of get_public_key from the first one
if the device doesn't accept any of them.
timeout_ms: Timeout to wait for the device to respond to our CNXN
request. Actual timeout may take longer if the transport object passed
has a longer default timeout than timeout_ms, or if auth_timeout_ms is
longer than timeout_ms and public key auth is used. This argument may
be a PolledTimeout object.
auth_timeout_ms: Timeout to wait for when sending a new public key. This
is only relevant when we send a new public key. The device shows a
dialog and this timeout is how long to wait for that dialog. If used
in automation, this should be low to catch such a case as a failure
quickly; while in interactive settings it should be high to allow
users to accept the dialog. We default to automation here, so it's low
by default. This argument may be a PolledTimeout object.
Returns:
An instance of AdbConnection that is connected to the device.
Raises:
usb_exceptions.DeviceAuthError: When the device expects authentication,
but we weren't given any valid keys.
usb_exceptions.AdbProtocolError: When the device does authentication in an
unexpected way, or fails to respond appropriately to our CNXN request. | [
"Establish",
"a",
"new",
"connection",
"to",
"a",
"device",
"connected",
"via",
"transport",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L826-L912 | train | 221,809 |
google/openhtf | examples/example_plugs.py | ExamplePlug.increment | def increment(self):
"""Increment our value, return the previous value."""
self.value += self.increment_size
return self.value - self.increment_size | python | def increment(self):
"""Increment our value, return the previous value."""
self.value += self.increment_size
return self.value - self.increment_size | [
"def",
"increment",
"(",
"self",
")",
":",
"self",
".",
"value",
"+=",
"self",
".",
"increment_size",
"return",
"self",
".",
"value",
"-",
"self",
".",
"increment_size"
] | Increment our value, return the previous value. | [
"Increment",
"our",
"value",
"return",
"the",
"previous",
"value",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/example_plugs.py#L76-L79 | train | 221,810 |
google/openhtf | openhtf/plugs/__init__.py | plug | def plug(update_kwargs=True, **plugs_map):
"""Creates a decorator that passes in plugs when invoked.
This function returns a decorator for a function that will replace positional
arguments to that function with the plugs specified. See the module
docstring for details and examples.
Note this decorator does not work with class or bound methods, but does work
with @staticmethod.
Args:
update_kwargs: If true, makes the decorated phase take this plug as a kwarg.
**plugs_map: Dict mapping name to Plug type.
Returns:
A PhaseDescriptor that will pass plug instances in as kwargs when invoked.
Raises:
InvalidPlugError: If a type is provided that is not a subclass of BasePlug.
"""
for a_plug in plugs_map.values():
if not (isinstance(a_plug, PlugPlaceholder)
or issubclass(a_plug, BasePlug)):
raise InvalidPlugError(
'Plug %s is not a subclass of plugs.BasePlug nor a placeholder '
'for one' % a_plug)
def result(func):
"""Wrap the given function and return the wrapper.
Args:
func: The function to wrap.
Returns:
A PhaseDescriptor that, when called will invoke the wrapped function,
passing plugs as keyword args.
Raises:
DuplicatePlugError: If a plug name is declared twice for the
same function.
"""
phase = openhtf.core.phase_descriptor.PhaseDescriptor.wrap_or_copy(func)
duplicates = (frozenset(p.name for p in phase.plugs) &
frozenset(plugs_map))
if duplicates:
raise DuplicatePlugError(
'Plugs %s required multiple times on phase %s' % (duplicates, func))
phase.plugs.extend([
PhasePlug(name, a_plug, update_kwargs=update_kwargs)
for name, a_plug in six.iteritems(plugs_map)])
return phase
return result | python | def plug(update_kwargs=True, **plugs_map):
"""Creates a decorator that passes in plugs when invoked.
This function returns a decorator for a function that will replace positional
arguments to that function with the plugs specified. See the module
docstring for details and examples.
Note this decorator does not work with class or bound methods, but does work
with @staticmethod.
Args:
update_kwargs: If true, makes the decorated phase take this plug as a kwarg.
**plugs_map: Dict mapping name to Plug type.
Returns:
A PhaseDescriptor that will pass plug instances in as kwargs when invoked.
Raises:
InvalidPlugError: If a type is provided that is not a subclass of BasePlug.
"""
for a_plug in plugs_map.values():
if not (isinstance(a_plug, PlugPlaceholder)
or issubclass(a_plug, BasePlug)):
raise InvalidPlugError(
'Plug %s is not a subclass of plugs.BasePlug nor a placeholder '
'for one' % a_plug)
def result(func):
"""Wrap the given function and return the wrapper.
Args:
func: The function to wrap.
Returns:
A PhaseDescriptor that, when called will invoke the wrapped function,
passing plugs as keyword args.
Raises:
DuplicatePlugError: If a plug name is declared twice for the
same function.
"""
phase = openhtf.core.phase_descriptor.PhaseDescriptor.wrap_or_copy(func)
duplicates = (frozenset(p.name for p in phase.plugs) &
frozenset(plugs_map))
if duplicates:
raise DuplicatePlugError(
'Plugs %s required multiple times on phase %s' % (duplicates, func))
phase.plugs.extend([
PhasePlug(name, a_plug, update_kwargs=update_kwargs)
for name, a_plug in six.iteritems(plugs_map)])
return phase
return result | [
"def",
"plug",
"(",
"update_kwargs",
"=",
"True",
",",
"*",
"*",
"plugs_map",
")",
":",
"for",
"a_plug",
"in",
"plugs_map",
".",
"values",
"(",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"a_plug",
",",
"PlugPlaceholder",
")",
"or",
"issubclass",
"(... | Creates a decorator that passes in plugs when invoked.
This function returns a decorator for a function that will replace positional
arguments to that function with the plugs specified. See the module
docstring for details and examples.
Note this decorator does not work with class or bound methods, but does work
with @staticmethod.
Args:
update_kwargs: If true, makes the decorated phase take this plug as a kwarg.
**plugs_map: Dict mapping name to Plug type.
Returns:
A PhaseDescriptor that will pass plug instances in as kwargs when invoked.
Raises:
InvalidPlugError: If a type is provided that is not a subclass of BasePlug. | [
"Creates",
"a",
"decorator",
"that",
"passes",
"in",
"plugs",
"when",
"invoked",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/__init__.py#L217-L269 | train | 221,811 |
google/openhtf | openhtf/plugs/__init__.py | BasePlug.uses_base_tear_down | def uses_base_tear_down(cls):
"""Checks whether the tearDown method is the BasePlug implementation."""
this_tear_down = getattr(cls, 'tearDown')
base_tear_down = getattr(BasePlug, 'tearDown')
return this_tear_down.__code__ is base_tear_down.__code__ | python | def uses_base_tear_down(cls):
"""Checks whether the tearDown method is the BasePlug implementation."""
this_tear_down = getattr(cls, 'tearDown')
base_tear_down = getattr(BasePlug, 'tearDown')
return this_tear_down.__code__ is base_tear_down.__code__ | [
"def",
"uses_base_tear_down",
"(",
"cls",
")",
":",
"this_tear_down",
"=",
"getattr",
"(",
"cls",
",",
"'tearDown'",
")",
"base_tear_down",
"=",
"getattr",
"(",
"BasePlug",
",",
"'tearDown'",
")",
"return",
"this_tear_down",
".",
"__code__",
"is",
"base_tear_dow... | Checks whether the tearDown method is the BasePlug implementation. | [
"Checks",
"whether",
"the",
"tearDown",
"method",
"is",
"the",
"BasePlug",
"implementation",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/__init__.py#L196-L200 | train | 221,812 |
google/openhtf | openhtf/plugs/__init__.py | PlugManager.get_plug_mro | def get_plug_mro(self, plug_type):
"""Returns a list of names identifying the plug classes in the plug's MRO.
For example:
['openhtf.plugs.user_input.UserInput']
Or:
['openhtf.plugs.user_input.UserInput',
'my_module.advanced_user_input.AdvancedUserInput']
"""
ignored_classes = (BasePlug, FrontendAwareBasePlug)
return [
self.get_plug_name(base_class) for base_class in plug_type.mro()
if (issubclass(base_class, BasePlug) and
base_class not in ignored_classes)
] | python | def get_plug_mro(self, plug_type):
"""Returns a list of names identifying the plug classes in the plug's MRO.
For example:
['openhtf.plugs.user_input.UserInput']
Or:
['openhtf.plugs.user_input.UserInput',
'my_module.advanced_user_input.AdvancedUserInput']
"""
ignored_classes = (BasePlug, FrontendAwareBasePlug)
return [
self.get_plug_name(base_class) for base_class in plug_type.mro()
if (issubclass(base_class, BasePlug) and
base_class not in ignored_classes)
] | [
"def",
"get_plug_mro",
"(",
"self",
",",
"plug_type",
")",
":",
"ignored_classes",
"=",
"(",
"BasePlug",
",",
"FrontendAwareBasePlug",
")",
"return",
"[",
"self",
".",
"get_plug_name",
"(",
"base_class",
")",
"for",
"base_class",
"in",
"plug_type",
".",
"mro",... | Returns a list of names identifying the plug classes in the plug's MRO.
For example:
['openhtf.plugs.user_input.UserInput']
Or:
['openhtf.plugs.user_input.UserInput',
'my_module.advanced_user_input.AdvancedUserInput'] | [
"Returns",
"a",
"list",
"of",
"names",
"identifying",
"the",
"plug",
"classes",
"in",
"the",
"plug",
"s",
"MRO",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/__init__.py#L338-L352 | train | 221,813 |
google/openhtf | openhtf/plugs/__init__.py | PlugManager.initialize_plugs | def initialize_plugs(self, plug_types=None):
"""Instantiate required plugs.
Instantiates plug types and saves the instances in self._plugs_by_type for
use in provide_plugs().
Args:
plug_types: Plug types may be specified here rather than passed
into the constructor (this is used primarily for unit testing
phases).
"""
types = plug_types if plug_types is not None else self._plug_types
for plug_type in types:
# Create a logger for this plug. All plug loggers go under the 'plug'
# sub-logger in the logger hierarchy.
plug_logger = self.logger.getChild(plug_type.__name__)
if plug_type in self._plugs_by_type:
continue
try:
if not issubclass(plug_type, BasePlug):
raise InvalidPlugError(
'Plug type "%s" is not an instance of BasePlug' % plug_type)
if plug_type.logger != _LOG:
# They put a logger attribute on the class itself, overriding ours.
raise InvalidPlugError(
'Do not override "logger" in your plugs.', plug_type)
# Override the logger so that __init__'s logging goes into the record.
plug_type.logger = plug_logger
try:
plug_instance = plug_type()
finally:
# Now set it back since we'll give the instance a logger in a moment.
plug_type.logger = _LOG
# Set the logger attribute directly (rather than in BasePlug) so we
# don't depend on subclasses' implementation of __init__ to have it
# set.
if plug_instance.logger != _LOG:
raise InvalidPlugError(
'Do not set "self.logger" in __init__ in your plugs', plug_type)
else:
# Now the instance has its own copy of the test logger.
plug_instance.logger = plug_logger
except Exception: # pylint: disable=broad-except
plug_logger.exception('Exception instantiating plug type %s', plug_type)
self.tear_down_plugs()
raise
self.update_plug(plug_type, plug_instance) | python | def initialize_plugs(self, plug_types=None):
"""Instantiate required plugs.
Instantiates plug types and saves the instances in self._plugs_by_type for
use in provide_plugs().
Args:
plug_types: Plug types may be specified here rather than passed
into the constructor (this is used primarily for unit testing
phases).
"""
types = plug_types if plug_types is not None else self._plug_types
for plug_type in types:
# Create a logger for this plug. All plug loggers go under the 'plug'
# sub-logger in the logger hierarchy.
plug_logger = self.logger.getChild(plug_type.__name__)
if plug_type in self._plugs_by_type:
continue
try:
if not issubclass(plug_type, BasePlug):
raise InvalidPlugError(
'Plug type "%s" is not an instance of BasePlug' % plug_type)
if plug_type.logger != _LOG:
# They put a logger attribute on the class itself, overriding ours.
raise InvalidPlugError(
'Do not override "logger" in your plugs.', plug_type)
# Override the logger so that __init__'s logging goes into the record.
plug_type.logger = plug_logger
try:
plug_instance = plug_type()
finally:
# Now set it back since we'll give the instance a logger in a moment.
plug_type.logger = _LOG
# Set the logger attribute directly (rather than in BasePlug) so we
# don't depend on subclasses' implementation of __init__ to have it
# set.
if plug_instance.logger != _LOG:
raise InvalidPlugError(
'Do not set "self.logger" in __init__ in your plugs', plug_type)
else:
# Now the instance has its own copy of the test logger.
plug_instance.logger = plug_logger
except Exception: # pylint: disable=broad-except
plug_logger.exception('Exception instantiating plug type %s', plug_type)
self.tear_down_plugs()
raise
self.update_plug(plug_type, plug_instance) | [
"def",
"initialize_plugs",
"(",
"self",
",",
"plug_types",
"=",
"None",
")",
":",
"types",
"=",
"plug_types",
"if",
"plug_types",
"is",
"not",
"None",
"else",
"self",
".",
"_plug_types",
"for",
"plug_type",
"in",
"types",
":",
"# Create a logger for this plug. A... | Instantiate required plugs.
Instantiates plug types and saves the instances in self._plugs_by_type for
use in provide_plugs().
Args:
plug_types: Plug types may be specified here rather than passed
into the constructor (this is used primarily for unit testing
phases). | [
"Instantiate",
"required",
"plugs",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/__init__.py#L362-L409 | train | 221,814 |
google/openhtf | openhtf/plugs/__init__.py | PlugManager.update_plug | def update_plug(self, plug_type, plug_value):
"""Update internal data stores with the given plug value for plug type.
Safely tears down the old instance if one was already created, but that's
generally not the case outside unittests. Also, we explicitly pass the
plug_type rather than detecting it from plug_value to allow unittests to
override plugs with Mock instances.
Note this should only be used inside unittests, as this mechanism is not
compatible with RemotePlug support.
"""
self._plug_types.add(plug_type)
if plug_type in self._plugs_by_type:
self._plugs_by_type[plug_type].tearDown()
plug_name = self.get_plug_name(plug_type)
self._plugs_by_type[plug_type] = plug_value
self._plugs_by_name[plug_name] = plug_value
self._plug_descriptors[plug_name] = self._make_plug_descriptor(plug_type) | python | def update_plug(self, plug_type, plug_value):
"""Update internal data stores with the given plug value for plug type.
Safely tears down the old instance if one was already created, but that's
generally not the case outside unittests. Also, we explicitly pass the
plug_type rather than detecting it from plug_value to allow unittests to
override plugs with Mock instances.
Note this should only be used inside unittests, as this mechanism is not
compatible with RemotePlug support.
"""
self._plug_types.add(plug_type)
if plug_type in self._plugs_by_type:
self._plugs_by_type[plug_type].tearDown()
plug_name = self.get_plug_name(plug_type)
self._plugs_by_type[plug_type] = plug_value
self._plugs_by_name[plug_name] = plug_value
self._plug_descriptors[plug_name] = self._make_plug_descriptor(plug_type) | [
"def",
"update_plug",
"(",
"self",
",",
"plug_type",
",",
"plug_value",
")",
":",
"self",
".",
"_plug_types",
".",
"add",
"(",
"plug_type",
")",
"if",
"plug_type",
"in",
"self",
".",
"_plugs_by_type",
":",
"self",
".",
"_plugs_by_type",
"[",
"plug_type",
"... | Update internal data stores with the given plug value for plug type.
Safely tears down the old instance if one was already created, but that's
generally not the case outside unittests. Also, we explicitly pass the
plug_type rather than detecting it from plug_value to allow unittests to
override plugs with Mock instances.
Note this should only be used inside unittests, as this mechanism is not
compatible with RemotePlug support. | [
"Update",
"internal",
"data",
"stores",
"with",
"the",
"given",
"plug",
"value",
"for",
"plug",
"type",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/__init__.py#L425-L442 | train | 221,815 |
google/openhtf | openhtf/plugs/__init__.py | PlugManager.wait_for_plug_update | def wait_for_plug_update(self, plug_name, remote_state, timeout_s):
"""Wait for a change in the state of a frontend-aware plug.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
remote_state: The last observed state.
timeout_s: Number of seconds to wait for an update.
Returns:
An updated state, or None if the timeout runs out.
Raises:
InvalidPlugError: The plug can't be waited on either because it's not in
use or it's not a frontend-aware plug.
"""
plug = self._plugs_by_name.get(plug_name)
if plug is None:
raise InvalidPlugError('Cannot wait on unknown plug "%s".' % plug_name)
if not isinstance(plug, FrontendAwareBasePlug):
raise InvalidPlugError('Cannot wait on a plug %s that is not an subclass '
'of FrontendAwareBasePlug.' % plug_name)
state, update_event = plug.asdict_with_event()
if state != remote_state:
return state
if update_event.wait(timeout_s):
return plug._asdict() | python | def wait_for_plug_update(self, plug_name, remote_state, timeout_s):
"""Wait for a change in the state of a frontend-aware plug.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
remote_state: The last observed state.
timeout_s: Number of seconds to wait for an update.
Returns:
An updated state, or None if the timeout runs out.
Raises:
InvalidPlugError: The plug can't be waited on either because it's not in
use or it's not a frontend-aware plug.
"""
plug = self._plugs_by_name.get(plug_name)
if plug is None:
raise InvalidPlugError('Cannot wait on unknown plug "%s".' % plug_name)
if not isinstance(plug, FrontendAwareBasePlug):
raise InvalidPlugError('Cannot wait on a plug %s that is not an subclass '
'of FrontendAwareBasePlug.' % plug_name)
state, update_event = plug.asdict_with_event()
if state != remote_state:
return state
if update_event.wait(timeout_s):
return plug._asdict() | [
"def",
"wait_for_plug_update",
"(",
"self",
",",
"plug_name",
",",
"remote_state",
",",
"timeout_s",
")",
":",
"plug",
"=",
"self",
".",
"_plugs_by_name",
".",
"get",
"(",
"plug_name",
")",
"if",
"plug",
"is",
"None",
":",
"raise",
"InvalidPlugError",
"(",
... | Wait for a change in the state of a frontend-aware plug.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
remote_state: The last observed state.
timeout_s: Number of seconds to wait for an update.
Returns:
An updated state, or None if the timeout runs out.
Raises:
InvalidPlugError: The plug can't be waited on either because it's not in
use or it's not a frontend-aware plug. | [
"Wait",
"for",
"a",
"change",
"in",
"the",
"state",
"of",
"a",
"frontend",
"-",
"aware",
"plug",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/__init__.py#L477-L506 | train | 221,816 |
google/openhtf | openhtf/plugs/__init__.py | PlugManager.get_frontend_aware_plug_names | def get_frontend_aware_plug_names(self):
"""Returns the names of frontend-aware plugs."""
return [name for name, plug in six.iteritems(self._plugs_by_name)
if isinstance(plug, FrontendAwareBasePlug)] | python | def get_frontend_aware_plug_names(self):
"""Returns the names of frontend-aware plugs."""
return [name for name, plug in six.iteritems(self._plugs_by_name)
if isinstance(plug, FrontendAwareBasePlug)] | [
"def",
"get_frontend_aware_plug_names",
"(",
"self",
")",
":",
"return",
"[",
"name",
"for",
"name",
",",
"plug",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_plugs_by_name",
")",
"if",
"isinstance",
"(",
"plug",
",",
"FrontendAwareBasePlug",
")",
"]"
... | Returns the names of frontend-aware plugs. | [
"Returns",
"the",
"names",
"of",
"frontend",
"-",
"aware",
"plugs",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/__init__.py#L508-L511 | train | 221,817 |
google/openhtf | openhtf/output/servers/station_server.py | _wait_for_any_event | def _wait_for_any_event(events, timeout_s):
"""Wait for any in a list of threading.Event's to be set.
Args:
events: List of threading.Event's.
timeout_s: Max duration in seconds to wait before returning.
Returns:
True if at least one event was set before the timeout expired, else False.
"""
def any_event_set():
return any(event.is_set() for event in events)
result = timeouts.loop_until_timeout_or_true(
timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S)
return result or any_event_set() | python | def _wait_for_any_event(events, timeout_s):
"""Wait for any in a list of threading.Event's to be set.
Args:
events: List of threading.Event's.
timeout_s: Max duration in seconds to wait before returning.
Returns:
True if at least one event was set before the timeout expired, else False.
"""
def any_event_set():
return any(event.is_set() for event in events)
result = timeouts.loop_until_timeout_or_true(
timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S)
return result or any_event_set() | [
"def",
"_wait_for_any_event",
"(",
"events",
",",
"timeout_s",
")",
":",
"def",
"any_event_set",
"(",
")",
":",
"return",
"any",
"(",
"event",
".",
"is_set",
"(",
")",
"for",
"event",
"in",
"events",
")",
"result",
"=",
"timeouts",
".",
"loop_until_timeout... | Wait for any in a list of threading.Event's to be set.
Args:
events: List of threading.Event's.
timeout_s: Max duration in seconds to wait before returning.
Returns:
True if at least one event was set before the timeout expired, else False. | [
"Wait",
"for",
"any",
"in",
"a",
"list",
"of",
"threading",
".",
"Event",
"s",
"to",
"be",
"set",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/station_server.py#L119-L135 | train | 221,818 |
google/openhtf | openhtf/output/servers/station_server.py | StationWatcher._poll_for_update | def _poll_for_update(self):
"""Call the callback with the current test state, then wait for a change."""
test, test_state = _get_executing_test()
if test is None:
time.sleep(_WAIT_FOR_EXECUTING_TEST_POLL_S)
return
state_dict, event = self._to_dict_with_event(test_state)
self._update_callback(state_dict)
plug_manager = test_state.plug_manager
plug_events = [
plug_manager.get_plug_by_class_path(plug_name).asdict_with_event()[1]
for plug_name in plug_manager.get_frontend_aware_plug_names()
]
events = [event] + plug_events
# Wait for the test state or a plug state to change, or for the previously
# executing test to finish.
while not _wait_for_any_event(events, _CHECK_FOR_FINISHED_TEST_POLL_S):
new_test, _ = _get_executing_test()
if test != new_test:
break | python | def _poll_for_update(self):
"""Call the callback with the current test state, then wait for a change."""
test, test_state = _get_executing_test()
if test is None:
time.sleep(_WAIT_FOR_EXECUTING_TEST_POLL_S)
return
state_dict, event = self._to_dict_with_event(test_state)
self._update_callback(state_dict)
plug_manager = test_state.plug_manager
plug_events = [
plug_manager.get_plug_by_class_path(plug_name).asdict_with_event()[1]
for plug_name in plug_manager.get_frontend_aware_plug_names()
]
events = [event] + plug_events
# Wait for the test state or a plug state to change, or for the previously
# executing test to finish.
while not _wait_for_any_event(events, _CHECK_FOR_FINISHED_TEST_POLL_S):
new_test, _ = _get_executing_test()
if test != new_test:
break | [
"def",
"_poll_for_update",
"(",
"self",
")",
":",
"test",
",",
"test_state",
"=",
"_get_executing_test",
"(",
")",
"if",
"test",
"is",
"None",
":",
"time",
".",
"sleep",
"(",
"_WAIT_FOR_EXECUTING_TEST_POLL_S",
")",
"return",
"state_dict",
",",
"event",
"=",
... | Call the callback with the current test state, then wait for a change. | [
"Call",
"the",
"callback",
"with",
"the",
"current",
"test",
"state",
"then",
"wait",
"for",
"a",
"change",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/station_server.py#L176-L199 | train | 221,819 |
google/openhtf | openhtf/output/servers/station_server.py | StationWatcher._to_dict_with_event | def _to_dict_with_event(cls, test_state):
"""Process a test state into the format we want to send to the frontend."""
original_dict, event = test_state.asdict_with_event()
# This line may produce a 'dictionary changed size during iteration' error.
test_state_dict = data.convert_to_base_types(original_dict)
test_state_dict['execution_uid'] = test_state.execution_uid
return test_state_dict, event | python | def _to_dict_with_event(cls, test_state):
"""Process a test state into the format we want to send to the frontend."""
original_dict, event = test_state.asdict_with_event()
# This line may produce a 'dictionary changed size during iteration' error.
test_state_dict = data.convert_to_base_types(original_dict)
test_state_dict['execution_uid'] = test_state.execution_uid
return test_state_dict, event | [
"def",
"_to_dict_with_event",
"(",
"cls",
",",
"test_state",
")",
":",
"original_dict",
",",
"event",
"=",
"test_state",
".",
"asdict_with_event",
"(",
")",
"# This line may produce a 'dictionary changed size during iteration' error.",
"test_state_dict",
"=",
"data",
".",
... | Process a test state into the format we want to send to the frontend. | [
"Process",
"a",
"test",
"state",
"into",
"the",
"format",
"we",
"want",
"to",
"send",
"to",
"the",
"frontend",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/station_server.py#L202-L210 | train | 221,820 |
google/openhtf | openhtf/util/threads.py | _safe_lock_release_py2 | def _safe_lock_release_py2(rlock):
"""Ensure that a threading.RLock is fully released for Python 2.
The RLock release code is:
https://github.com/python/cpython/blob/2.7/Lib/threading.py#L187
The RLock object's release method does not release all of its state if an
exception is raised in the middle of its operation. There are three pieces of
internal state that must be cleaned up:
- owning thread ident, an integer.
- entry count, an integer that counts how many times the current owner has
locked the RLock.
- internal lock, a threading.Lock instance that handles blocking.
Args:
rlock: threading.RLock, lock to fully release.
Yields:
None.
"""
assert isinstance(rlock, threading._RLock)
ident = _thread.get_ident()
expected_count = 0
if rlock._RLock__owner == ident:
expected_count = rlock._RLock__count
try:
yield
except ThreadTerminationError:
# Check if the current thread still owns the lock by checking if we can
# acquire the underlying lock.
if rlock._RLock__block.acquire(0):
# Lock is clean, so unlock and we are done.
rlock._RLock__block.release()
elif rlock._RLock__owner == ident and expected_count > 0:
# The lock is still held up the stack, so make sure the count is accurate.
if rlock._RLock__count != expected_count:
rlock._RLock__count = expected_count
elif rlock._RLock__owner == ident or rlock._RLock__owner is None:
# The internal lock is still acquired, but either this thread or no thread
# owns it, which means it needs to be hard reset.
rlock._RLock__owner = None
rlock._RLock__count = 0
rlock._RLock__block.release()
raise | python | def _safe_lock_release_py2(rlock):
"""Ensure that a threading.RLock is fully released for Python 2.
The RLock release code is:
https://github.com/python/cpython/blob/2.7/Lib/threading.py#L187
The RLock object's release method does not release all of its state if an
exception is raised in the middle of its operation. There are three pieces of
internal state that must be cleaned up:
- owning thread ident, an integer.
- entry count, an integer that counts how many times the current owner has
locked the RLock.
- internal lock, a threading.Lock instance that handles blocking.
Args:
rlock: threading.RLock, lock to fully release.
Yields:
None.
"""
assert isinstance(rlock, threading._RLock)
ident = _thread.get_ident()
expected_count = 0
if rlock._RLock__owner == ident:
expected_count = rlock._RLock__count
try:
yield
except ThreadTerminationError:
# Check if the current thread still owns the lock by checking if we can
# acquire the underlying lock.
if rlock._RLock__block.acquire(0):
# Lock is clean, so unlock and we are done.
rlock._RLock__block.release()
elif rlock._RLock__owner == ident and expected_count > 0:
# The lock is still held up the stack, so make sure the count is accurate.
if rlock._RLock__count != expected_count:
rlock._RLock__count = expected_count
elif rlock._RLock__owner == ident or rlock._RLock__owner is None:
# The internal lock is still acquired, but either this thread or no thread
# owns it, which means it needs to be hard reset.
rlock._RLock__owner = None
rlock._RLock__count = 0
rlock._RLock__block.release()
raise | [
"def",
"_safe_lock_release_py2",
"(",
"rlock",
")",
":",
"assert",
"isinstance",
"(",
"rlock",
",",
"threading",
".",
"_RLock",
")",
"ident",
"=",
"_thread",
".",
"get_ident",
"(",
")",
"expected_count",
"=",
"0",
"if",
"rlock",
".",
"_RLock__owner",
"==",
... | Ensure that a threading.RLock is fully released for Python 2.
The RLock release code is:
https://github.com/python/cpython/blob/2.7/Lib/threading.py#L187
The RLock object's release method does not release all of its state if an
exception is raised in the middle of its operation. There are three pieces of
internal state that must be cleaned up:
- owning thread ident, an integer.
- entry count, an integer that counts how many times the current owner has
locked the RLock.
- internal lock, a threading.Lock instance that handles blocking.
Args:
rlock: threading.RLock, lock to fully release.
Yields:
None. | [
"Ensure",
"that",
"a",
"threading",
".",
"RLock",
"is",
"fully",
"released",
"for",
"Python",
"2",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/threads.py#L54-L97 | train | 221,821 |
google/openhtf | openhtf/util/threads.py | loop | def loop(_=None, force=False): # pylint: disable=invalid-name
"""Causes a function to loop indefinitely."""
if not force:
raise AttributeError(
'threads.loop() is DEPRECATED. If you really like this and want to '
'keep it, file an issue at https://github.com/google/openhtf/issues '
'and use it as @loop(force=True) for now.')
def real_loop(fn):
@functools.wraps(fn)
def _proc(*args, **kwargs):
"""Wrapper to return."""
while True:
fn(*args, **kwargs)
_proc.once = fn # way for tests to invoke the function once
# you may need to pass in "self" since this may be unbound.
return _proc
return real_loop | python | def loop(_=None, force=False): # pylint: disable=invalid-name
"""Causes a function to loop indefinitely."""
if not force:
raise AttributeError(
'threads.loop() is DEPRECATED. If you really like this and want to '
'keep it, file an issue at https://github.com/google/openhtf/issues '
'and use it as @loop(force=True) for now.')
def real_loop(fn):
@functools.wraps(fn)
def _proc(*args, **kwargs):
"""Wrapper to return."""
while True:
fn(*args, **kwargs)
_proc.once = fn # way for tests to invoke the function once
# you may need to pass in "self" since this may be unbound.
return _proc
return real_loop | [
"def",
"loop",
"(",
"_",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"# pylint: disable=invalid-name",
"if",
"not",
"force",
":",
"raise",
"AttributeError",
"(",
"'threads.loop() is DEPRECATED. If you really like this and want to '",
"'keep it, file an issue at http... | Causes a function to loop indefinitely. | [
"Causes",
"a",
"function",
"to",
"loop",
"indefinitely",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/threads.py#L101-L118 | train | 221,822 |
google/openhtf | openhtf/util/threads.py | synchronized | def synchronized(func): # pylint: disable=invalid-name
"""Hold self._lock while executing func."""
@functools.wraps(func)
def synchronized_method(self, *args, **kwargs):
"""Wrapper to return."""
if not hasattr(self, '_lock'):
if func.__name__ in type(self).__dict__:
hint = ''
else:
hint = (' Might be missing call to super in %s.__init__?' %
type(self).__name__)
raise RuntimeError('Can\'t synchronize method `%s` of %s without '
'attribute `_lock`.%s' %
(func.__name__, type(self).__name__, hint))
with self._lock: # pylint: disable=protected-access
return func(self, *args, **kwargs)
return synchronized_method | python | def synchronized(func): # pylint: disable=invalid-name
"""Hold self._lock while executing func."""
@functools.wraps(func)
def synchronized_method(self, *args, **kwargs):
"""Wrapper to return."""
if not hasattr(self, '_lock'):
if func.__name__ in type(self).__dict__:
hint = ''
else:
hint = (' Might be missing call to super in %s.__init__?' %
type(self).__name__)
raise RuntimeError('Can\'t synchronize method `%s` of %s without '
'attribute `_lock`.%s' %
(func.__name__, type(self).__name__, hint))
with self._lock: # pylint: disable=protected-access
return func(self, *args, **kwargs)
return synchronized_method | [
"def",
"synchronized",
"(",
"func",
")",
":",
"# pylint: disable=invalid-name",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"synchronized_method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapper to return.\"\"\"",
... | Hold self._lock while executing func. | [
"Hold",
"self",
".",
"_lock",
"while",
"executing",
"func",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/threads.py#L241-L257 | train | 221,823 |
google/openhtf | openhtf/util/threads.py | KillableThread.kill | def kill(self):
"""Terminates the current thread by raising an error."""
self._killed.set()
if not self.is_alive():
logging.debug('Cannot kill thread that is no longer running.')
return
if not self._is_thread_proc_running():
logging.debug("Thread's _thread_proc function is no longer running, "
'will not kill; letting thread exit gracefully.')
return
self.async_raise(ThreadTerminationError) | python | def kill(self):
"""Terminates the current thread by raising an error."""
self._killed.set()
if not self.is_alive():
logging.debug('Cannot kill thread that is no longer running.')
return
if not self._is_thread_proc_running():
logging.debug("Thread's _thread_proc function is no longer running, "
'will not kill; letting thread exit gracefully.')
return
self.async_raise(ThreadTerminationError) | [
"def",
"kill",
"(",
"self",
")",
":",
"self",
".",
"_killed",
".",
"set",
"(",
")",
"if",
"not",
"self",
".",
"is_alive",
"(",
")",
":",
"logging",
".",
"debug",
"(",
"'Cannot kill thread that is no longer running.'",
")",
"return",
"if",
"not",
"self",
... | Terminates the current thread by raising an error. | [
"Terminates",
"the",
"current",
"thread",
"by",
"raising",
"an",
"error",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/threads.py#L193-L203 | train | 221,824 |
google/openhtf | openhtf/util/threads.py | KillableThread.async_raise | def async_raise(self, exc_type):
"""Raise the exception."""
# Should only be called on a started thread, so raise otherwise.
assert self.ident is not None, 'Only started threads have thread identifier'
# If the thread has died we don't want to raise an exception so log.
if not self.is_alive():
_LOG.debug('Not raising %s because thread %s (%s) is not alive',
exc_type, self.name, self.ident)
return
result = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(self.ident), ctypes.py_object(exc_type))
if result == 0 and self.is_alive():
# Don't raise an exception an error unnecessarily if the thread is dead.
raise ValueError('Thread ID was invalid.', self.ident)
elif result > 1:
# Something bad happened, call with a NULL exception to undo.
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.ident, None)
raise RuntimeError('Error: PyThreadState_SetAsyncExc %s %s (%s) %s' % (
exc_type, self.name, self.ident, result)) | python | def async_raise(self, exc_type):
"""Raise the exception."""
# Should only be called on a started thread, so raise otherwise.
assert self.ident is not None, 'Only started threads have thread identifier'
# If the thread has died we don't want to raise an exception so log.
if not self.is_alive():
_LOG.debug('Not raising %s because thread %s (%s) is not alive',
exc_type, self.name, self.ident)
return
result = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(self.ident), ctypes.py_object(exc_type))
if result == 0 and self.is_alive():
# Don't raise an exception an error unnecessarily if the thread is dead.
raise ValueError('Thread ID was invalid.', self.ident)
elif result > 1:
# Something bad happened, call with a NULL exception to undo.
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.ident, None)
raise RuntimeError('Error: PyThreadState_SetAsyncExc %s %s (%s) %s' % (
exc_type, self.name, self.ident, result)) | [
"def",
"async_raise",
"(",
"self",
",",
"exc_type",
")",
":",
"# Should only be called on a started thread, so raise otherwise.",
"assert",
"self",
".",
"ident",
"is",
"not",
"None",
",",
"'Only started threads have thread identifier'",
"# If the thread has died we don't want to ... | Raise the exception. | [
"Raise",
"the",
"exception",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/threads.py#L205-L225 | train | 221,825 |
google/openhtf | openhtf/util/logs.py | get_record_logger_for | def get_record_logger_for(test_uid):
"""Return the child logger associated with the specified test UID."""
htf_logger = logging.getLogger(RECORD_LOGGER_PREFIX)
record_logger = HtfTestLogger('.'.join(((RECORD_LOGGER_PREFIX, test_uid))))
record_logger.parent = htf_logger
return record_logger | python | def get_record_logger_for(test_uid):
"""Return the child logger associated with the specified test UID."""
htf_logger = logging.getLogger(RECORD_LOGGER_PREFIX)
record_logger = HtfTestLogger('.'.join(((RECORD_LOGGER_PREFIX, test_uid))))
record_logger.parent = htf_logger
return record_logger | [
"def",
"get_record_logger_for",
"(",
"test_uid",
")",
":",
"htf_logger",
"=",
"logging",
".",
"getLogger",
"(",
"RECORD_LOGGER_PREFIX",
")",
"record_logger",
"=",
"HtfTestLogger",
"(",
"'.'",
".",
"join",
"(",
"(",
"(",
"RECORD_LOGGER_PREFIX",
",",
"test_uid",
"... | Return the child logger associated with the specified test UID. | [
"Return",
"the",
"child",
"logger",
"associated",
"with",
"the",
"specified",
"test",
"UID",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L161-L166 | train | 221,826 |
google/openhtf | openhtf/util/logs.py | initialize_record_handler | def initialize_record_handler(test_uid, test_record, notify_update):
"""Initialize the record handler for a test.
For each running test, we attach a record handler to the top-level OpenHTF
logger. The handler will append OpenHTF logs to the test record, while
filtering out logs that are specific to any other test run.
"""
htf_logger = logging.getLogger(LOGGER_PREFIX)
htf_logger.addHandler(RecordHandler(test_uid, test_record, notify_update)) | python | def initialize_record_handler(test_uid, test_record, notify_update):
"""Initialize the record handler for a test.
For each running test, we attach a record handler to the top-level OpenHTF
logger. The handler will append OpenHTF logs to the test record, while
filtering out logs that are specific to any other test run.
"""
htf_logger = logging.getLogger(LOGGER_PREFIX)
htf_logger.addHandler(RecordHandler(test_uid, test_record, notify_update)) | [
"def",
"initialize_record_handler",
"(",
"test_uid",
",",
"test_record",
",",
"notify_update",
")",
":",
"htf_logger",
"=",
"logging",
".",
"getLogger",
"(",
"LOGGER_PREFIX",
")",
"htf_logger",
".",
"addHandler",
"(",
"RecordHandler",
"(",
"test_uid",
",",
"test_r... | Initialize the record handler for a test.
For each running test, we attach a record handler to the top-level OpenHTF
logger. The handler will append OpenHTF logs to the test record, while
filtering out logs that are specific to any other test run. | [
"Initialize",
"the",
"record",
"handler",
"for",
"a",
"test",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L169-L177 | train | 221,827 |
google/openhtf | openhtf/util/logs.py | log_once | def log_once(log_func, msg, *args, **kwargs):
""""Logs a message only once."""
if msg not in _LOG_ONCE_SEEN:
log_func(msg, *args, **kwargs)
# Key on the message, ignoring args. This should fit most use cases.
_LOG_ONCE_SEEN.add(msg) | python | def log_once(log_func, msg, *args, **kwargs):
""""Logs a message only once."""
if msg not in _LOG_ONCE_SEEN:
log_func(msg, *args, **kwargs)
# Key on the message, ignoring args. This should fit most use cases.
_LOG_ONCE_SEEN.add(msg) | [
"def",
"log_once",
"(",
"log_func",
",",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"msg",
"not",
"in",
"_LOG_ONCE_SEEN",
":",
"log_func",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Key on the message, ignorin... | Logs a message only once. | [
"Logs",
"a",
"message",
"only",
"once",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L188-L193 | train | 221,828 |
google/openhtf | openhtf/util/logs.py | configure_logging | def configure_logging():
"""One-time initialization of loggers. See module docstring for more info."""
# Define the top-level logger.
htf_logger = logging.getLogger(LOGGER_PREFIX)
htf_logger.propagate = False
htf_logger.setLevel(logging.DEBUG)
# By default, don't print any logs to the CLI.
if CLI_LOGGING_VERBOSITY == 0:
htf_logger.addHandler(logging.NullHandler())
return
if CLI_LOGGING_VERBOSITY == 1:
logging_level = logging.INFO
else:
logging_level = logging.DEBUG
# Configure a handler to print to the CLI.
cli_handler = KillableThreadSafeStreamHandler(stream=sys.stdout)
cli_handler.setFormatter(CliFormatter())
cli_handler.setLevel(logging_level)
cli_handler.addFilter(MAC_FILTER)
htf_logger.addHandler(cli_handler)
# Suppress CLI logging if the --quiet flag is used, or while CLI_QUIET is set
# in the console_output module.
cli_handler.addFilter(console_output.CliQuietFilter()) | python | def configure_logging():
"""One-time initialization of loggers. See module docstring for more info."""
# Define the top-level logger.
htf_logger = logging.getLogger(LOGGER_PREFIX)
htf_logger.propagate = False
htf_logger.setLevel(logging.DEBUG)
# By default, don't print any logs to the CLI.
if CLI_LOGGING_VERBOSITY == 0:
htf_logger.addHandler(logging.NullHandler())
return
if CLI_LOGGING_VERBOSITY == 1:
logging_level = logging.INFO
else:
logging_level = logging.DEBUG
# Configure a handler to print to the CLI.
cli_handler = KillableThreadSafeStreamHandler(stream=sys.stdout)
cli_handler.setFormatter(CliFormatter())
cli_handler.setLevel(logging_level)
cli_handler.addFilter(MAC_FILTER)
htf_logger.addHandler(cli_handler)
# Suppress CLI logging if the --quiet flag is used, or while CLI_QUIET is set
# in the console_output module.
cli_handler.addFilter(console_output.CliQuietFilter()) | [
"def",
"configure_logging",
"(",
")",
":",
"# Define the top-level logger.",
"htf_logger",
"=",
"logging",
".",
"getLogger",
"(",
"LOGGER_PREFIX",
")",
"htf_logger",
".",
"propagate",
"=",
"False",
"htf_logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
... | One-time initialization of loggers. See module docstring for more info. | [
"One",
"-",
"time",
"initialization",
"of",
"loggers",
".",
"See",
"module",
"docstring",
"for",
"more",
"info",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L319-L346 | train | 221,829 |
google/openhtf | openhtf/util/logs.py | RecordHandler.emit | def emit(self, record):
"""Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record.
"""
try:
message = self.format(record)
log_record = LogRecord(
record.levelno, record.name, os.path.basename(record.pathname),
record.lineno, int(record.created * 1000), message,
)
self._test_record.add_log_record(log_record)
self._notify_update()
except Exception: # pylint: disable=broad-except
self.handleError(record) | python | def emit(self, record):
"""Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record.
"""
try:
message = self.format(record)
log_record = LogRecord(
record.levelno, record.name, os.path.basename(record.pathname),
record.lineno, int(record.created * 1000), message,
)
self._test_record.add_log_record(log_record)
self._notify_update()
except Exception: # pylint: disable=broad-except
self.handleError(record) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"try",
":",
"message",
"=",
"self",
".",
"format",
"(",
"record",
")",
"log_record",
"=",
"LogRecord",
"(",
"record",
".",
"levelno",
",",
"record",
".",
"name",
",",
"os",
".",
"path",
".",
"bas... | Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record. | [
"Save",
"a",
"logging",
".",
"LogRecord",
"to",
"our",
"test",
"record",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L269-L288 | train | 221,830 |
google/openhtf | openhtf/util/logs.py | CliFormatter.format | def format(self, record):
"""Format the record as tersely as possible but preserve info."""
super(CliFormatter, self).format(record)
localized_time = datetime.datetime.fromtimestamp(record.created)
terse_time = localized_time.strftime(u'%H:%M:%S')
terse_level = record.levelname[0]
terse_name = record.name.split('.')[-1]
match = RECORD_LOGGER_RE.match(record.name)
if match:
# Figure out which OpenHTF subsystem the record came from.
subsys_match = SUBSYSTEM_LOGGER_RE.match(record.name)
if subsys_match:
terse_name = '<{subsys}: {id}>'.format(
subsys=subsys_match.group('subsys'),
id=subsys_match.group('id'))
else:
# Fall back to using the last five characters of the test UUID.
terse_name = '<test %s>' % match.group('test_uid')[-5:]
return '{lvl} {time} {logger} - {msg}'.format(lvl=terse_level,
time=terse_time,
logger=terse_name,
msg=record.message) | python | def format(self, record):
"""Format the record as tersely as possible but preserve info."""
super(CliFormatter, self).format(record)
localized_time = datetime.datetime.fromtimestamp(record.created)
terse_time = localized_time.strftime(u'%H:%M:%S')
terse_level = record.levelname[0]
terse_name = record.name.split('.')[-1]
match = RECORD_LOGGER_RE.match(record.name)
if match:
# Figure out which OpenHTF subsystem the record came from.
subsys_match = SUBSYSTEM_LOGGER_RE.match(record.name)
if subsys_match:
terse_name = '<{subsys}: {id}>'.format(
subsys=subsys_match.group('subsys'),
id=subsys_match.group('id'))
else:
# Fall back to using the last five characters of the test UUID.
terse_name = '<test %s>' % match.group('test_uid')[-5:]
return '{lvl} {time} {logger} - {msg}'.format(lvl=terse_level,
time=terse_time,
logger=terse_name,
msg=record.message) | [
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"super",
"(",
"CliFormatter",
",",
"self",
")",
".",
"format",
"(",
"record",
")",
"localized_time",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"record",
".",
"created",
")",
"terse... | Format the record as tersely as possible but preserve info. | [
"Format",
"the",
"record",
"as",
"tersely",
"as",
"possible",
"but",
"preserve",
"info",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L294-L315 | train | 221,831 |
google/openhtf | openhtf/output/callbacks/mfg_inspector.py | _send_mfg_inspector_request | def _send_mfg_inspector_request(envelope_data, credentials, destination_url):
"""Send upload http request. Intended to be run in retry loop."""
logging.info('Uploading result...')
http = httplib2.Http()
if credentials.access_token_expired:
credentials.refresh(http)
credentials.authorize(http)
resp, content = http.request(destination_url, 'POST', envelope_data)
try:
result = json.loads(content)
except Exception:
logging.debug('Upload failed with response %s: %s', resp, content)
raise UploadFailedError(resp, content)
if resp.status != 200:
logging.debug('Upload failed: %s', result)
raise UploadFailedError(result['error'], result)
return result | python | def _send_mfg_inspector_request(envelope_data, credentials, destination_url):
"""Send upload http request. Intended to be run in retry loop."""
logging.info('Uploading result...')
http = httplib2.Http()
if credentials.access_token_expired:
credentials.refresh(http)
credentials.authorize(http)
resp, content = http.request(destination_url, 'POST', envelope_data)
try:
result = json.loads(content)
except Exception:
logging.debug('Upload failed with response %s: %s', resp, content)
raise UploadFailedError(resp, content)
if resp.status != 200:
logging.debug('Upload failed: %s', result)
raise UploadFailedError(result['error'], result)
return result | [
"def",
"_send_mfg_inspector_request",
"(",
"envelope_data",
",",
"credentials",
",",
"destination_url",
")",
":",
"logging",
".",
"info",
"(",
"'Uploading result...'",
")",
"http",
"=",
"httplib2",
".",
"Http",
"(",
")",
"if",
"credentials",
".",
"access_token_exp... | Send upload http request. Intended to be run in retry loop. | [
"Send",
"upload",
"http",
"request",
".",
"Intended",
"to",
"be",
"run",
"in",
"retry",
"loop",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/callbacks/mfg_inspector.py#L27-L48 | train | 221,832 |
google/openhtf | openhtf/output/callbacks/mfg_inspector.py | send_mfg_inspector_data | def send_mfg_inspector_data(inspector_proto, credentials, destination_url):
"""Upload MfgEvent to steam_engine."""
envelope = guzzle_pb2.TestRunEnvelope()
envelope.payload = zlib.compress(inspector_proto.SerializeToString())
envelope.payload_type = guzzle_pb2.COMPRESSED_MFG_EVENT
envelope_data = envelope.SerializeToString()
for _ in xrange(5):
try:
result = _send_mfg_inspector_request(
envelope_data, credentials, destination_url)
return result
except UploadFailedError:
time.sleep(1)
logging.critical(
'Could not upload to mfg-inspector after 5 attempts. Giving up.')
return {} | python | def send_mfg_inspector_data(inspector_proto, credentials, destination_url):
"""Upload MfgEvent to steam_engine."""
envelope = guzzle_pb2.TestRunEnvelope()
envelope.payload = zlib.compress(inspector_proto.SerializeToString())
envelope.payload_type = guzzle_pb2.COMPRESSED_MFG_EVENT
envelope_data = envelope.SerializeToString()
for _ in xrange(5):
try:
result = _send_mfg_inspector_request(
envelope_data, credentials, destination_url)
return result
except UploadFailedError:
time.sleep(1)
logging.critical(
'Could not upload to mfg-inspector after 5 attempts. Giving up.')
return {} | [
"def",
"send_mfg_inspector_data",
"(",
"inspector_proto",
",",
"credentials",
",",
"destination_url",
")",
":",
"envelope",
"=",
"guzzle_pb2",
".",
"TestRunEnvelope",
"(",
")",
"envelope",
".",
"payload",
"=",
"zlib",
".",
"compress",
"(",
"inspector_proto",
".",
... | Upload MfgEvent to steam_engine. | [
"Upload",
"MfgEvent",
"to",
"steam_engine",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/callbacks/mfg_inspector.py#L51-L69 | train | 221,833 |
google/openhtf | openhtf/output/callbacks/mfg_inspector.py | MfgInspector._convert | def _convert(self, test_record_obj):
"""Convert and cache a test record to a mfg-inspector proto."""
if self._cached_proto is None:
self._cached_proto = self._converter(test_record_obj)
return self._cached_proto | python | def _convert(self, test_record_obj):
"""Convert and cache a test record to a mfg-inspector proto."""
if self._cached_proto is None:
self._cached_proto = self._converter(test_record_obj)
return self._cached_proto | [
"def",
"_convert",
"(",
"self",
",",
"test_record_obj",
")",
":",
"if",
"self",
".",
"_cached_proto",
"is",
"None",
":",
"self",
".",
"_cached_proto",
"=",
"self",
".",
"_converter",
"(",
"test_record_obj",
")",
"return",
"self",
".",
"_cached_proto"
] | Convert and cache a test record to a mfg-inspector proto. | [
"Convert",
"and",
"cache",
"a",
"test",
"record",
"to",
"a",
"mfg",
"-",
"inspector",
"proto",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/callbacks/mfg_inspector.py#L174-L180 | train | 221,834 |
google/openhtf | openhtf/output/callbacks/mfg_inspector.py | MfgInspector.save_to_disk | def save_to_disk(self, filename_pattern=None):
"""Returns a callback to convert test record to proto and save to disk."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'save_to_disk.')
pattern = filename_pattern or self._default_filename_pattern
if not pattern:
raise RuntimeError(
'Must specify provide a filename_pattern or set a '
'_default_filename_pattern on subclass.')
def save_to_disk_callback(test_record_obj):
proto = self._convert(test_record_obj)
output_to_file = callbacks.OutputToFile(pattern)
with output_to_file.open_output_file(test_record_obj) as outfile:
outfile.write(proto.SerializeToString())
return save_to_disk_callback | python | def save_to_disk(self, filename_pattern=None):
"""Returns a callback to convert test record to proto and save to disk."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'save_to_disk.')
pattern = filename_pattern or self._default_filename_pattern
if not pattern:
raise RuntimeError(
'Must specify provide a filename_pattern or set a '
'_default_filename_pattern on subclass.')
def save_to_disk_callback(test_record_obj):
proto = self._convert(test_record_obj)
output_to_file = callbacks.OutputToFile(pattern)
with output_to_file.open_output_file(test_record_obj) as outfile:
outfile.write(proto.SerializeToString())
return save_to_disk_callback | [
"def",
"save_to_disk",
"(",
"self",
",",
"filename_pattern",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_converter",
":",
"raise",
"RuntimeError",
"(",
"'Must set _converter on subclass or via set_converter before calling '",
"'save_to_disk.'",
")",
"pattern",
"=... | Returns a callback to convert test record to proto and save to disk. | [
"Returns",
"a",
"callback",
"to",
"convert",
"test",
"record",
"to",
"proto",
"and",
"save",
"to",
"disk",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/callbacks/mfg_inspector.py#L182-L202 | train | 221,835 |
google/openhtf | openhtf/output/callbacks/mfg_inspector.py | MfgInspector.upload | def upload(self):
"""Returns a callback to convert a test record to a proto and upload."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'upload.')
if not self.credentials:
raise RuntimeError('Must provide credentials to use upload callback.')
def upload_callback(test_record_obj):
proto = self._convert(test_record_obj)
self.upload_result = send_mfg_inspector_data(
proto, self.credentials, self.destination_url)
return upload_callback | python | def upload(self):
"""Returns a callback to convert a test record to a proto and upload."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'upload.')
if not self.credentials:
raise RuntimeError('Must provide credentials to use upload callback.')
def upload_callback(test_record_obj):
proto = self._convert(test_record_obj)
self.upload_result = send_mfg_inspector_data(
proto, self.credentials, self.destination_url)
return upload_callback | [
"def",
"upload",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_converter",
":",
"raise",
"RuntimeError",
"(",
"'Must set _converter on subclass or via set_converter before calling '",
"'upload.'",
")",
"if",
"not",
"self",
".",
"credentials",
":",
"raise",
"Run... | Returns a callback to convert a test record to a proto and upload. | [
"Returns",
"a",
"callback",
"to",
"convert",
"a",
"test",
"record",
"to",
"a",
"proto",
"and",
"upload",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/callbacks/mfg_inspector.py#L204-L219 | train | 221,836 |
google/openhtf | openhtf/plugs/usb/shell_service.py | AsyncCommandHandle._writer_thread_proc | def _writer_thread_proc(self, is_raw):
"""Write as long as the stream is not closed."""
# If we're not in raw mode, do line-buffered reads to play nicer with
# potential interactive uses, max of MAX_ADB_DATA, since anything we write
# to the stream will get packetized to that size anyway.
#
# Loop until our stream gets closed, which will cause one of these
# operations to raise. Since we're in a separate thread, it'll just get
# ignored, which is what we want.
reader = self.stdin.read if is_raw else self.stdin.readline
while not self.stream.is_closed():
self.stream.write(reader(adb_protocol.MAX_ADB_DATA)) | python | def _writer_thread_proc(self, is_raw):
"""Write as long as the stream is not closed."""
# If we're not in raw mode, do line-buffered reads to play nicer with
# potential interactive uses, max of MAX_ADB_DATA, since anything we write
# to the stream will get packetized to that size anyway.
#
# Loop until our stream gets closed, which will cause one of these
# operations to raise. Since we're in a separate thread, it'll just get
# ignored, which is what we want.
reader = self.stdin.read if is_raw else self.stdin.readline
while not self.stream.is_closed():
self.stream.write(reader(adb_protocol.MAX_ADB_DATA)) | [
"def",
"_writer_thread_proc",
"(",
"self",
",",
"is_raw",
")",
":",
"# If we're not in raw mode, do line-buffered reads to play nicer with",
"# potential interactive uses, max of MAX_ADB_DATA, since anything we write",
"# to the stream will get packetized to that size anyway.",
"#",
"# Loop ... | Write as long as the stream is not closed. | [
"Write",
"as",
"long",
"as",
"the",
"stream",
"is",
"not",
"closed",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/shell_service.py#L132-L143 | train | 221,837 |
google/openhtf | openhtf/plugs/usb/shell_service.py | AsyncCommandHandle._reader_thread_proc | def _reader_thread_proc(self, timeout):
"""Read until the stream is closed."""
for data in self.stream.read_until_close(timeout_ms=timeout):
if self.stdout is not None:
self.stdout.write(data) | python | def _reader_thread_proc(self, timeout):
"""Read until the stream is closed."""
for data in self.stream.read_until_close(timeout_ms=timeout):
if self.stdout is not None:
self.stdout.write(data) | [
"def",
"_reader_thread_proc",
"(",
"self",
",",
"timeout",
")",
":",
"for",
"data",
"in",
"self",
".",
"stream",
".",
"read_until_close",
"(",
"timeout_ms",
"=",
"timeout",
")",
":",
"if",
"self",
".",
"stdout",
"is",
"not",
"None",
":",
"self",
".",
"... | Read until the stream is closed. | [
"Read",
"until",
"the",
"stream",
"is",
"closed",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/shell_service.py#L145-L149 | train | 221,838 |
google/openhtf | openhtf/plugs/usb/shell_service.py | AsyncCommandHandle.wait | def wait(self, timeout_ms=None):
"""Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''.
"""
closed = timeouts.loop_until_timeout_or_true(
timeouts.PolledTimeout.from_millis(timeout_ms),
self.stream.is_closed, .1)
if closed:
if hasattr(self.stdout, 'getvalue'):
return self.stdout.getvalue()
return True
return None | python | def wait(self, timeout_ms=None):
"""Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''.
"""
closed = timeouts.loop_until_timeout_or_true(
timeouts.PolledTimeout.from_millis(timeout_ms),
self.stream.is_closed, .1)
if closed:
if hasattr(self.stdout, 'getvalue'):
return self.stdout.getvalue()
return True
return None | [
"def",
"wait",
"(",
"self",
",",
"timeout_ms",
"=",
"None",
")",
":",
"closed",
"=",
"timeouts",
".",
"loop_until_timeout_or_true",
"(",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
",",
"self",
".",
"stream",
".",
"is_closed... | Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''. | [
"Block",
"until",
"this",
"command",
"has",
"completed",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/shell_service.py#L169-L189 | train | 221,839 |
google/openhtf | openhtf/plugs/usb/shell_service.py | ShellService.command | def command(self, command, raw=False, timeout_ms=None):
"""Run the given command and return the output."""
return ''.join(self.streaming_command(command, raw, timeout_ms)) | python | def command(self, command, raw=False, timeout_ms=None):
"""Run the given command and return the output."""
return ''.join(self.streaming_command(command, raw, timeout_ms)) | [
"def",
"command",
"(",
"self",
",",
"command",
",",
"raw",
"=",
"False",
",",
"timeout_ms",
"=",
"None",
")",
":",
"return",
"''",
".",
"join",
"(",
"self",
".",
"streaming_command",
"(",
"command",
",",
"raw",
",",
"timeout_ms",
")",
")"
] | Run the given command and return the output. | [
"Run",
"the",
"given",
"command",
"and",
"return",
"the",
"output",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/shell_service.py#L223-L225 | train | 221,840 |
google/openhtf | openhtf/plugs/usb/shell_service.py | ShellService.streaming_command | def streaming_command(self, command, raw=False, timeout_ms=None):
"""Run the given command and yield the output as we receive it."""
if raw:
command = self._to_raw_command(command)
return self.adb_connection.streaming_command('shell', command, timeout_ms) | python | def streaming_command(self, command, raw=False, timeout_ms=None):
"""Run the given command and yield the output as we receive it."""
if raw:
command = self._to_raw_command(command)
return self.adb_connection.streaming_command('shell', command, timeout_ms) | [
"def",
"streaming_command",
"(",
"self",
",",
"command",
",",
"raw",
"=",
"False",
",",
"timeout_ms",
"=",
"None",
")",
":",
"if",
"raw",
":",
"command",
"=",
"self",
".",
"_to_raw_command",
"(",
"command",
")",
"return",
"self",
".",
"adb_connection",
"... | Run the given command and yield the output as we receive it. | [
"Run",
"the",
"given",
"command",
"and",
"yield",
"the",
"output",
"as",
"we",
"receive",
"it",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/shell_service.py#L227-L231 | train | 221,841 |
google/openhtf | openhtf/plugs/usb/shell_service.py | ShellService.async_command | def async_command(self, command, stdin=None, stdout=None, raw=False,
timeout_ms=None):
"""Run the given command on the device asynchronously.
Input will be read from stdin, output written to stdout. ADB doesn't
distinguish between stdout and stdin on the device, so they get interleaved
into stdout here. stdin and stdout should be file-like objects, so you
could use sys.stdin and sys.stdout to emulate the 'adb shell' commandline.
Args:
command: The command to run, will be run with /bin/sh -c 'command' on
the device.
stdin: File-like object to read from to pipe to the command's stdin. Can
be None, in which case nothing will be written to the command's stdin.
stdout: File-like object to write the command's output to. Can be None,
in which case the command's output will be buffered internally, and can
be access via the return value of wait().
raw: If True, run the command as per RawCommand (see above).
timeout_ms: Timeout for the command, in milliseconds.
Returns:
An AsyncCommandHandle instance that can be used to send/receive data to
and from the command or wait on the command to finish.
Raises:
AdbStreamUnavailableError: If the remote devices doesn't support the
shell: service.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
if raw:
command = self._to_raw_command(command)
stream = self.adb_connection.open_stream('shell:%s' % command, timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'%s does not support service: shell', self)
if raw and stdin is not None:
# Short delay to make sure the ioctl to set raw mode happens before we do
# any writes to the stream, if we don't do this bad things happen...
time.sleep(.1)
return AsyncCommandHandle(stream, stdin, stdout, timeout, raw) | python | def async_command(self, command, stdin=None, stdout=None, raw=False,
timeout_ms=None):
"""Run the given command on the device asynchronously.
Input will be read from stdin, output written to stdout. ADB doesn't
distinguish between stdout and stdin on the device, so they get interleaved
into stdout here. stdin and stdout should be file-like objects, so you
could use sys.stdin and sys.stdout to emulate the 'adb shell' commandline.
Args:
command: The command to run, will be run with /bin/sh -c 'command' on
the device.
stdin: File-like object to read from to pipe to the command's stdin. Can
be None, in which case nothing will be written to the command's stdin.
stdout: File-like object to write the command's output to. Can be None,
in which case the command's output will be buffered internally, and can
be access via the return value of wait().
raw: If True, run the command as per RawCommand (see above).
timeout_ms: Timeout for the command, in milliseconds.
Returns:
An AsyncCommandHandle instance that can be used to send/receive data to
and from the command or wait on the command to finish.
Raises:
AdbStreamUnavailableError: If the remote devices doesn't support the
shell: service.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
if raw:
command = self._to_raw_command(command)
stream = self.adb_connection.open_stream('shell:%s' % command, timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'%s does not support service: shell', self)
if raw and stdin is not None:
# Short delay to make sure the ioctl to set raw mode happens before we do
# any writes to the stream, if we don't do this bad things happen...
time.sleep(.1)
return AsyncCommandHandle(stream, stdin, stdout, timeout, raw) | [
"def",
"async_command",
"(",
"self",
",",
"command",
",",
"stdin",
"=",
"None",
",",
"stdout",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"timeout_ms",
"=",
"None",
")",
":",
"timeout",
"=",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"... | Run the given command on the device asynchronously.
Input will be read from stdin, output written to stdout. ADB doesn't
distinguish between stdout and stdin on the device, so they get interleaved
into stdout here. stdin and stdout should be file-like objects, so you
could use sys.stdin and sys.stdout to emulate the 'adb shell' commandline.
Args:
command: The command to run, will be run with /bin/sh -c 'command' on
the device.
stdin: File-like object to read from to pipe to the command's stdin. Can
be None, in which case nothing will be written to the command's stdin.
stdout: File-like object to write the command's output to. Can be None,
in which case the command's output will be buffered internally, and can
be access via the return value of wait().
raw: If True, run the command as per RawCommand (see above).
timeout_ms: Timeout for the command, in milliseconds.
Returns:
An AsyncCommandHandle instance that can be used to send/receive data to
and from the command or wait on the command to finish.
Raises:
AdbStreamUnavailableError: If the remote devices doesn't support the
shell: service. | [
"Run",
"the",
"given",
"command",
"on",
"the",
"device",
"asynchronously",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/shell_service.py#L234-L273 | train | 221,842 |
google/openhtf | openhtf/util/conf.py | Configuration.load_flag_values | def load_flag_values(self, flags=None):
"""Load flag values given from command line flags.
Args:
flags: An argparse Namespace containing the command line flags.
"""
if flags is None:
flags = self._flags
for keyval in flags.config_value:
k, v = keyval.split('=', 1)
v = self._modules['yaml'].load(v) if isinstance(v, str) else v
# Force any command line keys and values that are bytes to unicode.
k = k.decode() if isinstance(k, bytes) else k
v = v.decode() if isinstance(v, bytes) else v
self._flag_values.setdefault(k, v) | python | def load_flag_values(self, flags=None):
"""Load flag values given from command line flags.
Args:
flags: An argparse Namespace containing the command line flags.
"""
if flags is None:
flags = self._flags
for keyval in flags.config_value:
k, v = keyval.split('=', 1)
v = self._modules['yaml'].load(v) if isinstance(v, str) else v
# Force any command line keys and values that are bytes to unicode.
k = k.decode() if isinstance(k, bytes) else k
v = v.decode() if isinstance(v, bytes) else v
self._flag_values.setdefault(k, v) | [
"def",
"load_flag_values",
"(",
"self",
",",
"flags",
"=",
"None",
")",
":",
"if",
"flags",
"is",
"None",
":",
"flags",
"=",
"self",
".",
"_flags",
"for",
"keyval",
"in",
"flags",
".",
"config_value",
":",
"k",
",",
"v",
"=",
"keyval",
".",
"split",
... | Load flag values given from command line flags.
Args:
flags: An argparse Namespace containing the command line flags. | [
"Load",
"flag",
"values",
"given",
"from",
"command",
"line",
"flags",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L255-L271 | train | 221,843 |
google/openhtf | openhtf/util/conf.py | Configuration.declare | def declare(self, name, description=None, **kwargs):
"""Declare a configuration key with the given name.
Args:
name: Configuration key to declare, must not have been already declared.
description: If provided, use this as the description for this key.
**kwargs: Other kwargs to pass to the Declaration, only default_value
is currently supported.
"""
if not self._is_valid_key(name):
raise self.InvalidKeyError(
'Invalid key name, must begin with a lowercase letter', name)
if name in self._declarations:
raise self.KeyAlreadyDeclaredError(
'Configuration key already declared', name)
self._declarations[name] = self.Declaration(
name, description=description, **kwargs) | python | def declare(self, name, description=None, **kwargs):
"""Declare a configuration key with the given name.
Args:
name: Configuration key to declare, must not have been already declared.
description: If provided, use this as the description for this key.
**kwargs: Other kwargs to pass to the Declaration, only default_value
is currently supported.
"""
if not self._is_valid_key(name):
raise self.InvalidKeyError(
'Invalid key name, must begin with a lowercase letter', name)
if name in self._declarations:
raise self.KeyAlreadyDeclaredError(
'Configuration key already declared', name)
self._declarations[name] = self.Declaration(
name, description=description, **kwargs) | [
"def",
"declare",
"(",
"self",
",",
"name",
",",
"description",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"_is_valid_key",
"(",
"name",
")",
":",
"raise",
"self",
".",
"InvalidKeyError",
"(",
"'Invalid key name, must begin w... | Declare a configuration key with the given name.
Args:
name: Configuration key to declare, must not have been already declared.
description: If provided, use this as the description for this key.
**kwargs: Other kwargs to pass to the Declaration, only default_value
is currently supported. | [
"Declare",
"a",
"configuration",
"key",
"with",
"the",
"given",
"name",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L337-L353 | train | 221,844 |
google/openhtf | openhtf/util/conf.py | Configuration.reset | def reset(self):
"""Reset the loaded state of the configuration to what it was at import.
Note that this does *not* reset values set by commandline flags or loaded
from --config-file (in fact, any values loaded from --config-file that have
been overridden are reset to their value from --config-file).
"""
# Populate loaded_values with values from --config-file, if it was given.
self._loaded_values = {}
if self._flags.config_file is not None:
self.load_from_file(self._flags.config_file, _allow_undeclared=True) | python | def reset(self):
"""Reset the loaded state of the configuration to what it was at import.
Note that this does *not* reset values set by commandline flags or loaded
from --config-file (in fact, any values loaded from --config-file that have
been overridden are reset to their value from --config-file).
"""
# Populate loaded_values with values from --config-file, if it was given.
self._loaded_values = {}
if self._flags.config_file is not None:
self.load_from_file(self._flags.config_file, _allow_undeclared=True) | [
"def",
"reset",
"(",
"self",
")",
":",
"# Populate loaded_values with values from --config-file, if it was given.",
"self",
".",
"_loaded_values",
"=",
"{",
"}",
"if",
"self",
".",
"_flags",
".",
"config_file",
"is",
"not",
"None",
":",
"self",
".",
"load_from_file"... | Reset the loaded state of the configuration to what it was at import.
Note that this does *not* reset values set by commandline flags or loaded
from --config-file (in fact, any values loaded from --config-file that have
been overridden are reset to their value from --config-file). | [
"Reset",
"the",
"loaded",
"state",
"of",
"the",
"configuration",
"to",
"what",
"it",
"was",
"at",
"import",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L356-L366 | train | 221,845 |
google/openhtf | openhtf/util/conf.py | Configuration.load_from_file | def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):
"""Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML).
"""
self._logger.info('Loading configuration from file: %s', yamlfile)
try:
parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())
except self._modules['yaml'].YAMLError:
self._logger.exception('Problem parsing YAML')
raise self.ConfigurationInvalidError(
'Failed to load from %s as YAML' % yamlfile)
if not isinstance(parsed_yaml, dict):
# Parsed YAML, but it's not a dict.
raise self.ConfigurationInvalidError(
'YAML parsed, but wrong type, should be dict', parsed_yaml)
self._logger.debug('Configuration loaded from file: %s', parsed_yaml)
self.load_from_dict(
parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared) | python | def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):
"""Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML).
"""
self._logger.info('Loading configuration from file: %s', yamlfile)
try:
parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())
except self._modules['yaml'].YAMLError:
self._logger.exception('Problem parsing YAML')
raise self.ConfigurationInvalidError(
'Failed to load from %s as YAML' % yamlfile)
if not isinstance(parsed_yaml, dict):
# Parsed YAML, but it's not a dict.
raise self.ConfigurationInvalidError(
'YAML parsed, but wrong type, should be dict', parsed_yaml)
self._logger.debug('Configuration loaded from file: %s', parsed_yaml)
self.load_from_dict(
parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared) | [
"def",
"load_from_file",
"(",
"self",
",",
"yamlfile",
",",
"_override",
"=",
"True",
",",
"_allow_undeclared",
"=",
"False",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Loading configuration from file: %s'",
",",
"yamlfile",
")",
"try",
":",
"parsed... | Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML). | [
"Loads",
"the",
"configuration",
"from",
"a",
"file",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L368-L397 | train | 221,846 |
google/openhtf | openhtf/util/conf.py | Configuration.load_from_dict | def load_from_dict(self, dictionary, _override=True, _allow_undeclared=False):
"""Loads the config with values from a dictionary instead of a file.
This is meant for testing and bin purposes and shouldn't be used in most
applications.
Args:
dictionary: The dictionary containing config keys/values to update.
_override: If True, new values will override previous values.
_allow_undeclared: If True, silently load undeclared keys, otherwise
warn and ignore the value. Typically used for loading config
files before declarations have been evaluated.
"""
undeclared_keys = []
for key, value in self._modules['six'].iteritems(dictionary):
# Warn in this case. We raise if you try to access a config key that
# hasn't been declared, but we don't raise here so that you can use
# configuration files that are supersets of required configuration for
# any particular test station.
if key not in self._declarations and not _allow_undeclared:
undeclared_keys.append(key)
continue
if key in self._loaded_values:
if _override:
self._logger.info(
'Overriding previously loaded value for %s (%s) with value: %s',
key, self._loaded_values[key], value)
else:
self._logger.info(
'Ignoring new value (%s), keeping previous value for %s: %s',
value, key, self._loaded_values[key])
continue
# Force any keys and values that are bytes to unicode.
key = key.decode() if isinstance(key, bytes) else key
value = value.decode() if isinstance(value, bytes) else value
self._loaded_values[key] = value
if undeclared_keys:
self._logger.warning('Ignoring undeclared configuration keys: %s',
undeclared_keys) | python | def load_from_dict(self, dictionary, _override=True, _allow_undeclared=False):
"""Loads the config with values from a dictionary instead of a file.
This is meant for testing and bin purposes and shouldn't be used in most
applications.
Args:
dictionary: The dictionary containing config keys/values to update.
_override: If True, new values will override previous values.
_allow_undeclared: If True, silently load undeclared keys, otherwise
warn and ignore the value. Typically used for loading config
files before declarations have been evaluated.
"""
undeclared_keys = []
for key, value in self._modules['six'].iteritems(dictionary):
# Warn in this case. We raise if you try to access a config key that
# hasn't been declared, but we don't raise here so that you can use
# configuration files that are supersets of required configuration for
# any particular test station.
if key not in self._declarations and not _allow_undeclared:
undeclared_keys.append(key)
continue
if key in self._loaded_values:
if _override:
self._logger.info(
'Overriding previously loaded value for %s (%s) with value: %s',
key, self._loaded_values[key], value)
else:
self._logger.info(
'Ignoring new value (%s), keeping previous value for %s: %s',
value, key, self._loaded_values[key])
continue
# Force any keys and values that are bytes to unicode.
key = key.decode() if isinstance(key, bytes) else key
value = value.decode() if isinstance(value, bytes) else value
self._loaded_values[key] = value
if undeclared_keys:
self._logger.warning('Ignoring undeclared configuration keys: %s',
undeclared_keys) | [
"def",
"load_from_dict",
"(",
"self",
",",
"dictionary",
",",
"_override",
"=",
"True",
",",
"_allow_undeclared",
"=",
"False",
")",
":",
"undeclared_keys",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_modules",
"[",
"'six'",
"]",
".",... | Loads the config with values from a dictionary instead of a file.
This is meant for testing and bin purposes and shouldn't be used in most
applications.
Args:
dictionary: The dictionary containing config keys/values to update.
_override: If True, new values will override previous values.
_allow_undeclared: If True, silently load undeclared keys, otherwise
warn and ignore the value. Typically used for loading config
files before declarations have been evaluated. | [
"Loads",
"the",
"config",
"with",
"values",
"from",
"a",
"dictionary",
"instead",
"of",
"a",
"file",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L405-L445 | train | 221,847 |
google/openhtf | openhtf/util/conf.py | Configuration._asdict | def _asdict(self):
"""Create a dictionary snapshot of the current config values."""
# Start with any default values we have, and override with loaded values,
# and then override with flag values.
retval = {key: self._declarations[key].default_value for
key in self._declarations if self._declarations[key].has_default}
retval.update(self._loaded_values)
# Only update keys that are declared so we don't allow injecting
# un-declared keys via commandline flags.
for key, value in self._modules['six'].iteritems(self._flag_values):
if key in self._declarations:
retval[key] = value
return retval | python | def _asdict(self):
"""Create a dictionary snapshot of the current config values."""
# Start with any default values we have, and override with loaded values,
# and then override with flag values.
retval = {key: self._declarations[key].default_value for
key in self._declarations if self._declarations[key].has_default}
retval.update(self._loaded_values)
# Only update keys that are declared so we don't allow injecting
# un-declared keys via commandline flags.
for key, value in self._modules['six'].iteritems(self._flag_values):
if key in self._declarations:
retval[key] = value
return retval | [
"def",
"_asdict",
"(",
"self",
")",
":",
"# Start with any default values we have, and override with loaded values,",
"# and then override with flag values.",
"retval",
"=",
"{",
"key",
":",
"self",
".",
"_declarations",
"[",
"key",
"]",
".",
"default_value",
"for",
"key"... | Create a dictionary snapshot of the current config values. | [
"Create",
"a",
"dictionary",
"snapshot",
"of",
"the",
"current",
"config",
"values",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L448-L460 | train | 221,848 |
google/openhtf | openhtf/util/conf.py | Configuration.help_text | def help_text(self):
"""Return a string with all config keys and their descriptions."""
result = []
for name in sorted(self._declarations.keys()):
result.append(name)
result.append('-' * len(name))
decl = self._declarations[name]
if decl.description:
result.append(decl.description.strip())
else:
result.append('(no description found)')
if decl.has_default:
result.append('')
quotes = '"' if type(decl.default_value) is str else ''
result.append(' default_value={quotes}{val}{quotes}'.format(
quotes=quotes, val=decl.default_value))
result.append('')
result.append('')
return '\n'.join(result) | python | def help_text(self):
"""Return a string with all config keys and their descriptions."""
result = []
for name in sorted(self._declarations.keys()):
result.append(name)
result.append('-' * len(name))
decl = self._declarations[name]
if decl.description:
result.append(decl.description.strip())
else:
result.append('(no description found)')
if decl.has_default:
result.append('')
quotes = '"' if type(decl.default_value) is str else ''
result.append(' default_value={quotes}{val}{quotes}'.format(
quotes=quotes, val=decl.default_value))
result.append('')
result.append('')
return '\n'.join(result) | [
"def",
"help_text",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"for",
"name",
"in",
"sorted",
"(",
"self",
".",
"_declarations",
".",
"keys",
"(",
")",
")",
":",
"result",
".",
"append",
"(",
"name",
")",
"result",
".",
"append",
"(",
"'-'",
... | Return a string with all config keys and their descriptions. | [
"Return",
"a",
"string",
"with",
"all",
"config",
"keys",
"and",
"their",
"descriptions",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L463-L481 | train | 221,849 |
google/openhtf | openhtf/util/conf.py | Configuration.save_and_restore | def save_and_restore(self, _func=None, **config_values):
"""Decorator for saving conf state and restoring it after a function.
This decorator is primarily for use in tests, where conf keys may be updated
for individual test cases, but those values need to be reverted after the
test case is done.
Examples:
conf.declare('my_conf_key')
@conf.save_and_restore
def MyTestFunc():
conf.load(my_conf_key='baz')
SomeFuncUnderTestThatUsesMyConfKey()
conf.load(my_conf_key='foo')
MyTestFunc()
print conf.my_conf_key # Prints 'foo', *NOT* 'baz'
# Without the save_and_restore decorator, MyTestFunc() would have had the
# side effect of altering the conf value of 'my_conf_key' to 'baz'.
# Config keys can also be initialized for the context inline at decoration
# time. This is the same as setting them at the beginning of the
# function, but is a little clearer syntax if you know ahead of time what
# config keys and values you need to set.
@conf.save_and_restore(my_conf_key='baz')
def MyOtherTestFunc():
print conf.my_conf_key # Prints 'baz'
MyOtherTestFunc()
print conf.my_conf_key # Prints 'foo' again, for the same reason.
Args:
_func: The function to wrap. The returned wrapper will invoke the
function and restore the config to the state it was in at invocation.
**config_values: Config keys can be set inline at decoration time, see
examples. Note that config keys can't begin with underscore, so
there can be no name collision with _func.
Returns:
Wrapper to replace _func, as per Python decorator semantics.
"""
functools = self._modules['functools'] # pylint: disable=redefined-outer-name
if not _func:
return functools.partial(self.save_and_restore, **config_values)
@functools.wraps(_func)
def _saving_wrapper(*args, **kwargs):
saved_config = dict(self._loaded_values)
try:
self.load_from_dict(config_values)
return _func(*args, **kwargs)
finally:
self._loaded_values = saved_config # pylint: disable=attribute-defined-outside-init
return _saving_wrapper | python | def save_and_restore(self, _func=None, **config_values):
"""Decorator for saving conf state and restoring it after a function.
This decorator is primarily for use in tests, where conf keys may be updated
for individual test cases, but those values need to be reverted after the
test case is done.
Examples:
conf.declare('my_conf_key')
@conf.save_and_restore
def MyTestFunc():
conf.load(my_conf_key='baz')
SomeFuncUnderTestThatUsesMyConfKey()
conf.load(my_conf_key='foo')
MyTestFunc()
print conf.my_conf_key # Prints 'foo', *NOT* 'baz'
# Without the save_and_restore decorator, MyTestFunc() would have had the
# side effect of altering the conf value of 'my_conf_key' to 'baz'.
# Config keys can also be initialized for the context inline at decoration
# time. This is the same as setting them at the beginning of the
# function, but is a little clearer syntax if you know ahead of time what
# config keys and values you need to set.
@conf.save_and_restore(my_conf_key='baz')
def MyOtherTestFunc():
print conf.my_conf_key # Prints 'baz'
MyOtherTestFunc()
print conf.my_conf_key # Prints 'foo' again, for the same reason.
Args:
_func: The function to wrap. The returned wrapper will invoke the
function and restore the config to the state it was in at invocation.
**config_values: Config keys can be set inline at decoration time, see
examples. Note that config keys can't begin with underscore, so
there can be no name collision with _func.
Returns:
Wrapper to replace _func, as per Python decorator semantics.
"""
functools = self._modules['functools'] # pylint: disable=redefined-outer-name
if not _func:
return functools.partial(self.save_and_restore, **config_values)
@functools.wraps(_func)
def _saving_wrapper(*args, **kwargs):
saved_config = dict(self._loaded_values)
try:
self.load_from_dict(config_values)
return _func(*args, **kwargs)
finally:
self._loaded_values = saved_config # pylint: disable=attribute-defined-outside-init
return _saving_wrapper | [
"def",
"save_and_restore",
"(",
"self",
",",
"_func",
"=",
"None",
",",
"*",
"*",
"config_values",
")",
":",
"functools",
"=",
"self",
".",
"_modules",
"[",
"'functools'",
"]",
"# pylint: disable=redefined-outer-name",
"if",
"not",
"_func",
":",
"return",
"fun... | Decorator for saving conf state and restoring it after a function.
This decorator is primarily for use in tests, where conf keys may be updated
for individual test cases, but those values need to be reverted after the
test case is done.
Examples:
conf.declare('my_conf_key')
@conf.save_and_restore
def MyTestFunc():
conf.load(my_conf_key='baz')
SomeFuncUnderTestThatUsesMyConfKey()
conf.load(my_conf_key='foo')
MyTestFunc()
print conf.my_conf_key # Prints 'foo', *NOT* 'baz'
# Without the save_and_restore decorator, MyTestFunc() would have had the
# side effect of altering the conf value of 'my_conf_key' to 'baz'.
# Config keys can also be initialized for the context inline at decoration
# time. This is the same as setting them at the beginning of the
# function, but is a little clearer syntax if you know ahead of time what
# config keys and values you need to set.
@conf.save_and_restore(my_conf_key='baz')
def MyOtherTestFunc():
print conf.my_conf_key # Prints 'baz'
MyOtherTestFunc()
print conf.my_conf_key # Prints 'foo' again, for the same reason.
Args:
_func: The function to wrap. The returned wrapper will invoke the
function and restore the config to the state it was in at invocation.
**config_values: Config keys can be set inline at decoration time, see
examples. Note that config keys can't begin with underscore, so
there can be no name collision with _func.
Returns:
Wrapper to replace _func, as per Python decorator semantics. | [
"Decorator",
"for",
"saving",
"conf",
"state",
"and",
"restoring",
"it",
"after",
"a",
"function",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L483-L542 | train | 221,850 |
google/openhtf | openhtf/util/conf.py | Configuration.inject_positional_args | def inject_positional_args(self, method):
"""Decorator for injecting positional arguments from the configuration.
This decorator wraps the given method, so that any positional arguments are
passed with corresponding values from the configuration. The name of the
positional argument must match the configuration key.
Keyword arguments are *NEVER* modified, even if their names match
configuration keys. Avoid naming keyword args names that are also
configuration keys to avoid confusion.
Additional positional arguments may be used that do not appear in the
configuration, but those arguments *MUST* be specified as keyword arguments
upon invocation of the method. This is to avoid ambiguity in which
positional arguments are getting which values.
Args:
method: The method to wrap.
Returns:
A wrapper that, when invoked, will call the wrapped method, passing in
configuration values for positional arguments.
"""
inspect = self._modules['inspect']
argspec = inspect.getargspec(method)
# Index in argspec.args of the first keyword argument. This index is a
# negative number if there are any kwargs, or 0 if there are no kwargs.
keyword_arg_index = -1 * len(argspec.defaults or [])
arg_names = argspec.args[:keyword_arg_index or None]
kwarg_names = argspec.args[len(arg_names):]
functools = self._modules['functools'] # pylint: disable=redefined-outer-name
# Create the actual method wrapper, all we do is update kwargs. Note we
# don't pass any *args through because there can't be any - we've filled
# them all in with values from the configuration. Any positional args that
# are missing from the configuration *must* be explicitly specified as
# kwargs.
@functools.wraps(method)
def method_wrapper(**kwargs):
"""Wrapper that pulls values from openhtf.util.conf."""
# Check for keyword args with names that are in the config so we can warn.
for kwarg in kwarg_names:
if kwarg in self:
self._logger.warning('Keyword arg %s not set from configuration, but '
'is a configuration key', kwarg)
# Set positional args from configuration values.
final_kwargs = {name: self[name] for name in arg_names if name in self}
for overridden in set(kwargs) & set(final_kwargs):
self._logger.warning('Overriding configuration value for kwarg %s (%s) '
'with provided kwarg value: %s', overridden,
self[overridden], kwargs[overridden])
final_kwargs.update(kwargs)
if inspect.ismethod(method):
name = '%s.%s' % (method.__self__.__class__.__name__, method.__name__)
else:
name = method.__name__
self._logger.debug('Invoking %s with %s', name, final_kwargs)
return method(**final_kwargs)
# We have to check for a 'self' parameter explicitly because Python doesn't
# pass it as a keyword arg, it passes it as the first positional arg.
if argspec.args[0] == 'self':
@functools.wraps(method)
def self_wrapper(self, **kwargs): # pylint: disable=invalid-name
"""Wrapper that pulls values from openhtf.util.conf."""
kwargs['self'] = self
return method_wrapper(**kwargs)
return self_wrapper
return method_wrapper | python | def inject_positional_args(self, method):
"""Decorator for injecting positional arguments from the configuration.
This decorator wraps the given method, so that any positional arguments are
passed with corresponding values from the configuration. The name of the
positional argument must match the configuration key.
Keyword arguments are *NEVER* modified, even if their names match
configuration keys. Avoid naming keyword args names that are also
configuration keys to avoid confusion.
Additional positional arguments may be used that do not appear in the
configuration, but those arguments *MUST* be specified as keyword arguments
upon invocation of the method. This is to avoid ambiguity in which
positional arguments are getting which values.
Args:
method: The method to wrap.
Returns:
A wrapper that, when invoked, will call the wrapped method, passing in
configuration values for positional arguments.
"""
inspect = self._modules['inspect']
argspec = inspect.getargspec(method)
# Index in argspec.args of the first keyword argument. This index is a
# negative number if there are any kwargs, or 0 if there are no kwargs.
keyword_arg_index = -1 * len(argspec.defaults or [])
arg_names = argspec.args[:keyword_arg_index or None]
kwarg_names = argspec.args[len(arg_names):]
functools = self._modules['functools'] # pylint: disable=redefined-outer-name
# Create the actual method wrapper, all we do is update kwargs. Note we
# don't pass any *args through because there can't be any - we've filled
# them all in with values from the configuration. Any positional args that
# are missing from the configuration *must* be explicitly specified as
# kwargs.
@functools.wraps(method)
def method_wrapper(**kwargs):
"""Wrapper that pulls values from openhtf.util.conf."""
# Check for keyword args with names that are in the config so we can warn.
for kwarg in kwarg_names:
if kwarg in self:
self._logger.warning('Keyword arg %s not set from configuration, but '
'is a configuration key', kwarg)
# Set positional args from configuration values.
final_kwargs = {name: self[name] for name in arg_names if name in self}
for overridden in set(kwargs) & set(final_kwargs):
self._logger.warning('Overriding configuration value for kwarg %s (%s) '
'with provided kwarg value: %s', overridden,
self[overridden], kwargs[overridden])
final_kwargs.update(kwargs)
if inspect.ismethod(method):
name = '%s.%s' % (method.__self__.__class__.__name__, method.__name__)
else:
name = method.__name__
self._logger.debug('Invoking %s with %s', name, final_kwargs)
return method(**final_kwargs)
# We have to check for a 'self' parameter explicitly because Python doesn't
# pass it as a keyword arg, it passes it as the first positional arg.
if argspec.args[0] == 'self':
@functools.wraps(method)
def self_wrapper(self, **kwargs): # pylint: disable=invalid-name
"""Wrapper that pulls values from openhtf.util.conf."""
kwargs['self'] = self
return method_wrapper(**kwargs)
return self_wrapper
return method_wrapper | [
"def",
"inject_positional_args",
"(",
"self",
",",
"method",
")",
":",
"inspect",
"=",
"self",
".",
"_modules",
"[",
"'inspect'",
"]",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"method",
")",
"# Index in argspec.args of the first keyword argument. This index... | Decorator for injecting positional arguments from the configuration.
This decorator wraps the given method, so that any positional arguments are
passed with corresponding values from the configuration. The name of the
positional argument must match the configuration key.
Keyword arguments are *NEVER* modified, even if their names match
configuration keys. Avoid naming keyword args names that are also
configuration keys to avoid confusion.
Additional positional arguments may be used that do not appear in the
configuration, but those arguments *MUST* be specified as keyword arguments
upon invocation of the method. This is to avoid ambiguity in which
positional arguments are getting which values.
Args:
method: The method to wrap.
Returns:
A wrapper that, when invoked, will call the wrapped method, passing in
configuration values for positional arguments. | [
"Decorator",
"for",
"injecting",
"positional",
"arguments",
"from",
"the",
"configuration",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L544-L616 | train | 221,851 |
google/openhtf | openhtf/util/functions.py | call_once | def call_once(func):
"""Decorate a function to only allow it to be called once.
Note that it doesn't make sense to only call a function once if it takes
arguments (use @functools.lru_cache for that sort of thing), so this only
works on callables that take no args.
"""
argspec = inspect.getargspec(func)
if argspec.args or argspec.varargs or argspec.keywords:
raise ValueError('Can only decorate functions with no args', func, argspec)
@functools.wraps(func)
def _wrapper():
# If we haven't been called yet, actually invoke func and save the result.
if not _wrapper.HasRun():
_wrapper.MarkAsRun()
_wrapper.return_value = func()
return _wrapper.return_value
_wrapper.has_run = False
_wrapper.HasRun = lambda: _wrapper.has_run
_wrapper.MarkAsRun = lambda: setattr(_wrapper, 'has_run', True)
return _wrapper | python | def call_once(func):
"""Decorate a function to only allow it to be called once.
Note that it doesn't make sense to only call a function once if it takes
arguments (use @functools.lru_cache for that sort of thing), so this only
works on callables that take no args.
"""
argspec = inspect.getargspec(func)
if argspec.args or argspec.varargs or argspec.keywords:
raise ValueError('Can only decorate functions with no args', func, argspec)
@functools.wraps(func)
def _wrapper():
# If we haven't been called yet, actually invoke func and save the result.
if not _wrapper.HasRun():
_wrapper.MarkAsRun()
_wrapper.return_value = func()
return _wrapper.return_value
_wrapper.has_run = False
_wrapper.HasRun = lambda: _wrapper.has_run
_wrapper.MarkAsRun = lambda: setattr(_wrapper, 'has_run', True)
return _wrapper | [
"def",
"call_once",
"(",
"func",
")",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"if",
"argspec",
".",
"args",
"or",
"argspec",
".",
"varargs",
"or",
"argspec",
".",
"keywords",
":",
"raise",
"ValueError",
"(",
"'Can only decorate... | Decorate a function to only allow it to be called once.
Note that it doesn't make sense to only call a function once if it takes
arguments (use @functools.lru_cache for that sort of thing), so this only
works on callables that take no args. | [
"Decorate",
"a",
"function",
"to",
"only",
"allow",
"it",
"to",
"be",
"called",
"once",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/functions.py#L23-L45 | train | 221,852 |
google/openhtf | openhtf/util/functions.py | call_at_most_every | def call_at_most_every(seconds, count=1):
"""Call the decorated function at most count times every seconds seconds.
The decorated function will sleep to ensure that at most count invocations
occur within any 'seconds' second window.
"""
def decorator(func):
try:
call_history = getattr(func, '_call_history')
except AttributeError:
call_history = collections.deque(maxlen=count)
setattr(func, '_call_history', call_history)
@functools.wraps(func)
def _wrapper(*args, **kwargs):
current_time = time.time()
window_count = sum(ts > current_time - seconds for ts in call_history)
if window_count >= count:
# We need to sleep until the relevant call is outside the window. This
# should only ever be the the first entry in call_history, but if we
# somehow ended up with extra calls in the window, this recovers.
time.sleep(call_history[window_count - count] - current_time + seconds)
# Append this call, deque will automatically trim old calls using maxlen.
call_history.append(time.time())
return func(*args, **kwargs)
return _wrapper
return decorator | python | def call_at_most_every(seconds, count=1):
"""Call the decorated function at most count times every seconds seconds.
The decorated function will sleep to ensure that at most count invocations
occur within any 'seconds' second window.
"""
def decorator(func):
try:
call_history = getattr(func, '_call_history')
except AttributeError:
call_history = collections.deque(maxlen=count)
setattr(func, '_call_history', call_history)
@functools.wraps(func)
def _wrapper(*args, **kwargs):
current_time = time.time()
window_count = sum(ts > current_time - seconds for ts in call_history)
if window_count >= count:
# We need to sleep until the relevant call is outside the window. This
# should only ever be the the first entry in call_history, but if we
# somehow ended up with extra calls in the window, this recovers.
time.sleep(call_history[window_count - count] - current_time + seconds)
# Append this call, deque will automatically trim old calls using maxlen.
call_history.append(time.time())
return func(*args, **kwargs)
return _wrapper
return decorator | [
"def",
"call_at_most_every",
"(",
"seconds",
",",
"count",
"=",
"1",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"try",
":",
"call_history",
"=",
"getattr",
"(",
"func",
",",
"'_call_history'",
")",
"except",
"AttributeError",
":",
"call_history",
... | Call the decorated function at most count times every seconds seconds.
The decorated function will sleep to ensure that at most count invocations
occur within any 'seconds' second window. | [
"Call",
"the",
"decorated",
"function",
"at",
"most",
"count",
"times",
"every",
"seconds",
"seconds",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/functions.py#L47-L73 | train | 221,853 |
google/openhtf | openhtf/plugs/usb/__init__.py | _open_usb_handle | def _open_usb_handle(serial_number=None, **kwargs):
"""Open a UsbHandle subclass, based on configuration.
If configuration 'remote_usb' is set, use it to connect to remote usb,
otherwise attempt to connect locally.'remote_usb' is set to usb type,
EtherSync or other.
Example of Cambrionix unit in config:
remote_usb: ethersync
ethersync:
mac_addr: 78:a5:04:ca:91:66
plug_port: 5
Args:
serial_number: Optional serial number to connect to.
**kwargs: Arguments to pass to respective handle's Open() method.
Returns:
Instance of UsbHandle.
"""
init_dependent_flags()
remote_usb = conf.remote_usb
if remote_usb:
if remote_usb.strip() == 'ethersync':
device = conf.ethersync
try:
mac_addr = device['mac_addr']
port = device['plug_port']
except (KeyError, TypeError):
raise ValueError('Ethersync needs mac_addr and plug_port to be set')
else:
ethersync = cambrionix.EtherSync(mac_addr)
serial_number = ethersync.get_usb_serial(port)
return local_usb.LibUsbHandle.open(serial_number=serial_number, **kwargs) | python | def _open_usb_handle(serial_number=None, **kwargs):
"""Open a UsbHandle subclass, based on configuration.
If configuration 'remote_usb' is set, use it to connect to remote usb,
otherwise attempt to connect locally.'remote_usb' is set to usb type,
EtherSync or other.
Example of Cambrionix unit in config:
remote_usb: ethersync
ethersync:
mac_addr: 78:a5:04:ca:91:66
plug_port: 5
Args:
serial_number: Optional serial number to connect to.
**kwargs: Arguments to pass to respective handle's Open() method.
Returns:
Instance of UsbHandle.
"""
init_dependent_flags()
remote_usb = conf.remote_usb
if remote_usb:
if remote_usb.strip() == 'ethersync':
device = conf.ethersync
try:
mac_addr = device['mac_addr']
port = device['plug_port']
except (KeyError, TypeError):
raise ValueError('Ethersync needs mac_addr and plug_port to be set')
else:
ethersync = cambrionix.EtherSync(mac_addr)
serial_number = ethersync.get_usb_serial(port)
return local_usb.LibUsbHandle.open(serial_number=serial_number, **kwargs) | [
"def",
"_open_usb_handle",
"(",
"serial_number",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"init_dependent_flags",
"(",
")",
"remote_usb",
"=",
"conf",
".",
"remote_usb",
"if",
"remote_usb",
":",
"if",
"remote_usb",
".",
"strip",
"(",
")",
"==",
"'et... | Open a UsbHandle subclass, based on configuration.
If configuration 'remote_usb' is set, use it to connect to remote usb,
otherwise attempt to connect locally.'remote_usb' is set to usb type,
EtherSync or other.
Example of Cambrionix unit in config:
remote_usb: ethersync
ethersync:
mac_addr: 78:a5:04:ca:91:66
plug_port: 5
Args:
serial_number: Optional serial number to connect to.
**kwargs: Arguments to pass to respective handle's Open() method.
Returns:
Instance of UsbHandle. | [
"Open",
"a",
"UsbHandle",
"subclass",
"based",
"on",
"configuration",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/__init__.py#L62-L96 | train | 221,854 |
google/openhtf | openhtf/plugs/usb/__init__.py | AndroidTriggers._try_open | def _try_open(cls):
"""Try to open a USB handle."""
handle = None
for usb_cls, subcls, protocol in [(adb_device.CLASS,
adb_device.SUBCLASS,
adb_device.PROTOCOL),
(fastboot_device.CLASS,
fastboot_device.SUBCLASS,
fastboot_device.PROTOCOL)]:
try:
handle = local_usb.LibUsbHandle.open(
serial_number=cls.serial_number,
interface_class=usb_cls,
interface_subclass=subcls,
interface_protocol=protocol)
cls.serial_number = handle.serial_number
return True
except usb_exceptions.DeviceNotFoundError:
pass
except usb_exceptions.MultipleInterfacesFoundError:
_LOG.warning('Multiple Android devices found, ignoring!')
finally:
if handle:
handle.close()
return False | python | def _try_open(cls):
"""Try to open a USB handle."""
handle = None
for usb_cls, subcls, protocol in [(adb_device.CLASS,
adb_device.SUBCLASS,
adb_device.PROTOCOL),
(fastboot_device.CLASS,
fastboot_device.SUBCLASS,
fastboot_device.PROTOCOL)]:
try:
handle = local_usb.LibUsbHandle.open(
serial_number=cls.serial_number,
interface_class=usb_cls,
interface_subclass=subcls,
interface_protocol=protocol)
cls.serial_number = handle.serial_number
return True
except usb_exceptions.DeviceNotFoundError:
pass
except usb_exceptions.MultipleInterfacesFoundError:
_LOG.warning('Multiple Android devices found, ignoring!')
finally:
if handle:
handle.close()
return False | [
"def",
"_try_open",
"(",
"cls",
")",
":",
"handle",
"=",
"None",
"for",
"usb_cls",
",",
"subcls",
",",
"protocol",
"in",
"[",
"(",
"adb_device",
".",
"CLASS",
",",
"adb_device",
".",
"SUBCLASS",
",",
"adb_device",
".",
"PROTOCOL",
")",
",",
"(",
"fastb... | Try to open a USB handle. | [
"Try",
"to",
"open",
"a",
"USB",
"handle",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/__init__.py#L164-L188 | train | 221,855 |
google/openhtf | openhtf/plugs/usb/fastboot_device.py | _retry_usb_function | def _retry_usb_function(count, func, *args, **kwargs):
"""Helper function to retry USB."""
helper = timeouts.RetryHelper(count)
while True:
try:
return func(*args, **kwargs)
except usb_exceptions.CommonUsbError:
if not helper.retry_if_possible():
raise
time.sleep(0.1)
else:
break | python | def _retry_usb_function(count, func, *args, **kwargs):
"""Helper function to retry USB."""
helper = timeouts.RetryHelper(count)
while True:
try:
return func(*args, **kwargs)
except usb_exceptions.CommonUsbError:
if not helper.retry_if_possible():
raise
time.sleep(0.1)
else:
break | [
"def",
"_retry_usb_function",
"(",
"count",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"helper",
"=",
"timeouts",
".",
"RetryHelper",
"(",
"count",
")",
"while",
"True",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",... | Helper function to retry USB. | [
"Helper",
"function",
"to",
"retry",
"USB",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/fastboot_device.py#L112-L123 | train | 221,856 |
google/openhtf | openhtf/plugs/usb/fastboot_device.py | FastbootDevice.get_boot_config | def get_boot_config(self, name, info_cb=None):
"""Get bootconfig, either as full dict or specific value for key."""
result = {}
def default_info_cb(msg):
"""Default Info CB."""
if not msg.message:
return
key, value = msg.message.split(':', 1)
result[key.strip()] = value.strip()
info_cb = info_cb or default_info_cb
final_result = self.oem('bootconfig %s' % name, info_cb=info_cb)
# Return INFO messages before the final OKAY message.
if name in result:
return result[name]
return final_result | python | def get_boot_config(self, name, info_cb=None):
"""Get bootconfig, either as full dict or specific value for key."""
result = {}
def default_info_cb(msg):
"""Default Info CB."""
if not msg.message:
return
key, value = msg.message.split(':', 1)
result[key.strip()] = value.strip()
info_cb = info_cb or default_info_cb
final_result = self.oem('bootconfig %s' % name, info_cb=info_cb)
# Return INFO messages before the final OKAY message.
if name in result:
return result[name]
return final_result | [
"def",
"get_boot_config",
"(",
"self",
",",
"name",
",",
"info_cb",
"=",
"None",
")",
":",
"result",
"=",
"{",
"}",
"def",
"default_info_cb",
"(",
"msg",
")",
":",
"\"\"\"Default Info CB.\"\"\"",
"if",
"not",
"msg",
".",
"message",
":",
"return",
"key",
... | Get bootconfig, either as full dict or specific value for key. | [
"Get",
"bootconfig",
"either",
"as",
"full",
"dict",
"or",
"specific",
"value",
"for",
"key",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/fastboot_device.py#L49-L63 | train | 221,857 |
google/openhtf | openhtf/plugs/usb/local_usb.py | LibUsbHandle._device_to_sysfs_path | def _device_to_sysfs_path(device):
"""Convert device to corresponding sysfs path."""
return '%s-%s' % (
device.getBusNumber(),
'.'.join([str(item) for item in device.GetPortNumberList()])) | python | def _device_to_sysfs_path(device):
"""Convert device to corresponding sysfs path."""
return '%s-%s' % (
device.getBusNumber(),
'.'.join([str(item) for item in device.GetPortNumberList()])) | [
"def",
"_device_to_sysfs_path",
"(",
"device",
")",
":",
"return",
"'%s-%s'",
"%",
"(",
"device",
".",
"getBusNumber",
"(",
")",
",",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"item",
")",
"for",
"item",
"in",
"device",
".",
"GetPortNumberList",
"(",
"... | Convert device to corresponding sysfs path. | [
"Convert",
"device",
"to",
"corresponding",
"sysfs",
"path",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/local_usb.py#L112-L116 | train | 221,858 |
google/openhtf | openhtf/plugs/usb/local_usb.py | LibUsbHandle.open | def open(cls, **kwargs):
"""See iter_open, but raises if multiple or no matches found."""
handle_iter = cls.iter_open(**kwargs)
try:
handle = six.next(handle_iter)
except StopIteration:
# No matching interface, raise.
raise usb_exceptions.DeviceNotFoundError(
'Open failed with args: %s', kwargs)
try:
multiple_handle = six.next(handle_iter)
except StopIteration:
# Exactly one matching device, return it.
return handle
# We have more than one device, close the ones we opened and bail.
handle.close()
multiple_handle.close()
raise usb_exceptions.MultipleInterfacesFoundError(kwargs) | python | def open(cls, **kwargs):
"""See iter_open, but raises if multiple or no matches found."""
handle_iter = cls.iter_open(**kwargs)
try:
handle = six.next(handle_iter)
except StopIteration:
# No matching interface, raise.
raise usb_exceptions.DeviceNotFoundError(
'Open failed with args: %s', kwargs)
try:
multiple_handle = six.next(handle_iter)
except StopIteration:
# Exactly one matching device, return it.
return handle
# We have more than one device, close the ones we opened and bail.
handle.close()
multiple_handle.close()
raise usb_exceptions.MultipleInterfacesFoundError(kwargs) | [
"def",
"open",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"handle_iter",
"=",
"cls",
".",
"iter_open",
"(",
"*",
"*",
"kwargs",
")",
"try",
":",
"handle",
"=",
"six",
".",
"next",
"(",
"handle_iter",
")",
"except",
"StopIteration",
":",
"# No mat... | See iter_open, but raises if multiple or no matches found. | [
"See",
"iter_open",
"but",
"raises",
"if",
"multiple",
"or",
"no",
"matches",
"found",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/local_usb.py#L158-L178 | train | 221,859 |
google/openhtf | openhtf/plugs/usb/local_usb.py | LibUsbHandle.iter_open | def iter_open(cls, name=None, interface_class=None, interface_subclass=None,
interface_protocol=None, serial_number=None, port_path=None,
default_timeout_ms=None):
"""Find and yield locally connected devices that match.
Note that devices are opened (and interfaces claimd) as they are yielded.
Any devices yielded must be Close()'d.
Args:
name: Name to give *all* returned handles, used for logging only.
interface_class: USB interface_class to match.
interface_subclass: USB interface_subclass to match.
interface_protocol: USB interface_protocol to match.
serial_number: USB serial_number to match.
port_path: USB Port path to match, like X-X.X.X
default_timeout_ms: Default timeout in milliseconds of reads/writes on
the handles yielded.
Yields:
UsbHandle instances that match any non-None args given.
Raises:
LibusbWrappingError: When a libusb call errors during open.
"""
ctx = usb1.USBContext()
try:
devices = ctx.getDeviceList(skip_on_error=True)
except libusb1.USBError as exception:
raise usb_exceptions.LibusbWrappingError(
exception, 'Open(name=%s, class=%s, subclass=%s, protocol=%s, '
'serial=%s, port=%s) failed', name, interface_class,
interface_subclass, interface_protocol, serial_number, port_path)
for device in devices:
try:
if (serial_number is not None and
device.getSerialNumber() != serial_number):
continue
if (port_path is not None and
cls._device_to_sysfs_path(device) != port_path):
continue
for setting in device.iterSettings():
if (interface_class is not None and
setting.getClass() != interface_class):
continue
if (interface_subclass is not None and
setting.getSubClass() != interface_subclass):
continue
if (interface_protocol is not None and
setting.getProtocol() != interface_protocol):
continue
yield cls(device, setting, name=name,
default_timeout_ms=default_timeout_ms)
except libusb1.USBError as exception:
if (exception.value !=
libusb1.libusb_error.forward_dict['LIBUSB_ERROR_ACCESS']):
raise | python | def iter_open(cls, name=None, interface_class=None, interface_subclass=None,
interface_protocol=None, serial_number=None, port_path=None,
default_timeout_ms=None):
"""Find and yield locally connected devices that match.
Note that devices are opened (and interfaces claimd) as they are yielded.
Any devices yielded must be Close()'d.
Args:
name: Name to give *all* returned handles, used for logging only.
interface_class: USB interface_class to match.
interface_subclass: USB interface_subclass to match.
interface_protocol: USB interface_protocol to match.
serial_number: USB serial_number to match.
port_path: USB Port path to match, like X-X.X.X
default_timeout_ms: Default timeout in milliseconds of reads/writes on
the handles yielded.
Yields:
UsbHandle instances that match any non-None args given.
Raises:
LibusbWrappingError: When a libusb call errors during open.
"""
ctx = usb1.USBContext()
try:
devices = ctx.getDeviceList(skip_on_error=True)
except libusb1.USBError as exception:
raise usb_exceptions.LibusbWrappingError(
exception, 'Open(name=%s, class=%s, subclass=%s, protocol=%s, '
'serial=%s, port=%s) failed', name, interface_class,
interface_subclass, interface_protocol, serial_number, port_path)
for device in devices:
try:
if (serial_number is not None and
device.getSerialNumber() != serial_number):
continue
if (port_path is not None and
cls._device_to_sysfs_path(device) != port_path):
continue
for setting in device.iterSettings():
if (interface_class is not None and
setting.getClass() != interface_class):
continue
if (interface_subclass is not None and
setting.getSubClass() != interface_subclass):
continue
if (interface_protocol is not None and
setting.getProtocol() != interface_protocol):
continue
yield cls(device, setting, name=name,
default_timeout_ms=default_timeout_ms)
except libusb1.USBError as exception:
if (exception.value !=
libusb1.libusb_error.forward_dict['LIBUSB_ERROR_ACCESS']):
raise | [
"def",
"iter_open",
"(",
"cls",
",",
"name",
"=",
"None",
",",
"interface_class",
"=",
"None",
",",
"interface_subclass",
"=",
"None",
",",
"interface_protocol",
"=",
"None",
",",
"serial_number",
"=",
"None",
",",
"port_path",
"=",
"None",
",",
"default_tim... | Find and yield locally connected devices that match.
Note that devices are opened (and interfaces claimd) as they are yielded.
Any devices yielded must be Close()'d.
Args:
name: Name to give *all* returned handles, used for logging only.
interface_class: USB interface_class to match.
interface_subclass: USB interface_subclass to match.
interface_protocol: USB interface_protocol to match.
serial_number: USB serial_number to match.
port_path: USB Port path to match, like X-X.X.X
default_timeout_ms: Default timeout in milliseconds of reads/writes on
the handles yielded.
Yields:
UsbHandle instances that match any non-None args given.
Raises:
LibusbWrappingError: When a libusb call errors during open. | [
"Find",
"and",
"yield",
"locally",
"connected",
"devices",
"that",
"match",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/local_usb.py#L182-L241 | train | 221,860 |
google/openhtf | examples/all_the_things.py | hello_world | def hello_world(test, example, prompts):
"""A hello world test phase."""
test.logger.info('Hello World!')
test.measurements.widget_type = prompts.prompt(
'What\'s the widget type? (Hint: try `MyWidget` to PASS)',
text_input=True)
if test.measurements.widget_type == 'raise':
raise Exception()
test.measurements.widget_color = 'Black'
test.measurements.widget_size = 3
test.measurements.specified_as_args = 'Measurement args specified directly'
test.logger.info('Plug value: %s', example.increment()) | python | def hello_world(test, example, prompts):
"""A hello world test phase."""
test.logger.info('Hello World!')
test.measurements.widget_type = prompts.prompt(
'What\'s the widget type? (Hint: try `MyWidget` to PASS)',
text_input=True)
if test.measurements.widget_type == 'raise':
raise Exception()
test.measurements.widget_color = 'Black'
test.measurements.widget_size = 3
test.measurements.specified_as_args = 'Measurement args specified directly'
test.logger.info('Plug value: %s', example.increment()) | [
"def",
"hello_world",
"(",
"test",
",",
"example",
",",
"prompts",
")",
":",
"test",
".",
"logger",
".",
"info",
"(",
"'Hello World!'",
")",
"test",
".",
"measurements",
".",
"widget_type",
"=",
"prompts",
".",
"prompt",
"(",
"'What\\'s the widget type? (Hint:... | A hello world test phase. | [
"A",
"hello",
"world",
"test",
"phase",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/all_the_things.py#L55-L66 | train | 221,861 |
google/openhtf | examples/all_the_things.py | set_measurements | def set_measurements(test):
"""Test phase that sets a measurement."""
test.measurements.level_none = 0
time.sleep(1)
test.measurements.level_some = 8
time.sleep(1)
test.measurements.level_all = 9
time.sleep(1)
level_all = test.get_measurement('level_all')
assert level_all.value == 9 | python | def set_measurements(test):
"""Test phase that sets a measurement."""
test.measurements.level_none = 0
time.sleep(1)
test.measurements.level_some = 8
time.sleep(1)
test.measurements.level_all = 9
time.sleep(1)
level_all = test.get_measurement('level_all')
assert level_all.value == 9 | [
"def",
"set_measurements",
"(",
"test",
")",
":",
"test",
".",
"measurements",
".",
"level_none",
"=",
"0",
"time",
".",
"sleep",
"(",
"1",
")",
"test",
".",
"measurements",
".",
"level_some",
"=",
"8",
"time",
".",
"sleep",
"(",
"1",
")",
"test",
".... | Test phase that sets a measurement. | [
"Test",
"phase",
"that",
"sets",
"a",
"measurement",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/all_the_things.py#L75-L84 | train | 221,862 |
google/openhtf | openhtf/util/data.py | pprint_diff | def pprint_diff(first, second, first_name='first', second_name='second'):
"""Compare the pprint representation of two objects and yield diff lines."""
return difflib.unified_diff(
pprint.pformat(first).splitlines(),
pprint.pformat(second).splitlines(),
fromfile=first_name, tofile=second_name, lineterm='') | python | def pprint_diff(first, second, first_name='first', second_name='second'):
"""Compare the pprint representation of two objects and yield diff lines."""
return difflib.unified_diff(
pprint.pformat(first).splitlines(),
pprint.pformat(second).splitlines(),
fromfile=first_name, tofile=second_name, lineterm='') | [
"def",
"pprint_diff",
"(",
"first",
",",
"second",
",",
"first_name",
"=",
"'first'",
",",
"second_name",
"=",
"'second'",
")",
":",
"return",
"difflib",
".",
"unified_diff",
"(",
"pprint",
".",
"pformat",
"(",
"first",
")",
".",
"splitlines",
"(",
")",
... | Compare the pprint representation of two objects and yield diff lines. | [
"Compare",
"the",
"pprint",
"representation",
"of",
"two",
"objects",
"and",
"yield",
"diff",
"lines",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/data.py#L42-L47 | train | 221,863 |
google/openhtf | openhtf/util/data.py | equals_log_diff | def equals_log_diff(expected, actual, level=logging.ERROR):
"""Compare two string blobs, error log diff if they don't match."""
if expected == actual:
return True
# Output the diff first.
logging.log(level, '***** Data mismatch: *****')
for line in difflib.unified_diff(
expected.splitlines(), actual.splitlines(),
fromfile='expected', tofile='actual', lineterm=''):
logging.log(level, line)
logging.log(level, '^^^^^ Data diff ^^^^^') | python | def equals_log_diff(expected, actual, level=logging.ERROR):
"""Compare two string blobs, error log diff if they don't match."""
if expected == actual:
return True
# Output the diff first.
logging.log(level, '***** Data mismatch: *****')
for line in difflib.unified_diff(
expected.splitlines(), actual.splitlines(),
fromfile='expected', tofile='actual', lineterm=''):
logging.log(level, line)
logging.log(level, '^^^^^ Data diff ^^^^^') | [
"def",
"equals_log_diff",
"(",
"expected",
",",
"actual",
",",
"level",
"=",
"logging",
".",
"ERROR",
")",
":",
"if",
"expected",
"==",
"actual",
":",
"return",
"True",
"# Output the diff first.",
"logging",
".",
"log",
"(",
"level",
",",
"'***** Data mismatch... | Compare two string blobs, error log diff if they don't match. | [
"Compare",
"two",
"string",
"blobs",
"error",
"log",
"diff",
"if",
"they",
"don",
"t",
"match",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/data.py#L50-L61 | train | 221,864 |
google/openhtf | openhtf/util/data.py | assert_records_equal_nonvolatile | def assert_records_equal_nonvolatile(first, second, volatile_fields, indent=0):
"""Compare two test_record tuples, ignoring any volatile fields.
'Volatile' fields include any fields that are expected to differ between
successive runs of the same test, mainly timestamps. All other fields
are recursively compared.
"""
if isinstance(first, dict) and isinstance(second, dict):
if set(first) != set(second):
logging.error('%sMismatching keys:', ' ' * indent)
logging.error('%s %s', ' ' * indent, list(first.keys()))
logging.error('%s %s', ' ' * indent, list(second.keys()))
assert set(first) == set(second)
for key in first:
if key in volatile_fields:
continue
try:
assert_records_equal_nonvolatile(first[key], second[key],
volatile_fields, indent + 2)
except AssertionError:
logging.error('%sKey: %s ^', ' ' * indent, key)
raise
elif hasattr(first, '_asdict') and hasattr(second, '_asdict'):
# Compare namedtuples as dicts so we get more useful output.
assert_records_equal_nonvolatile(first._asdict(), second._asdict(),
volatile_fields, indent)
elif hasattr(first, '__iter__') and hasattr(second, '__iter__'):
for idx, (fir, sec) in enumerate(itertools.izip(first, second)):
try:
assert_records_equal_nonvolatile(fir, sec, volatile_fields, indent + 2)
except AssertionError:
logging.error('%sIndex: %s ^', ' ' * indent, idx)
raise
elif (isinstance(first, records.RecordClass) and
isinstance(second, records.RecordClass)):
assert_records_equal_nonvolatile(
{slot: getattr(first, slot) for slot in first.__slots__},
{slot: getattr(second, slot) for slot in second.__slots__},
volatile_fields, indent)
elif first != second:
logging.error('%sRaw: "%s" != "%s"', ' ' * indent, first, second)
assert first == second | python | def assert_records_equal_nonvolatile(first, second, volatile_fields, indent=0):
"""Compare two test_record tuples, ignoring any volatile fields.
'Volatile' fields include any fields that are expected to differ between
successive runs of the same test, mainly timestamps. All other fields
are recursively compared.
"""
if isinstance(first, dict) and isinstance(second, dict):
if set(first) != set(second):
logging.error('%sMismatching keys:', ' ' * indent)
logging.error('%s %s', ' ' * indent, list(first.keys()))
logging.error('%s %s', ' ' * indent, list(second.keys()))
assert set(first) == set(second)
for key in first:
if key in volatile_fields:
continue
try:
assert_records_equal_nonvolatile(first[key], second[key],
volatile_fields, indent + 2)
except AssertionError:
logging.error('%sKey: %s ^', ' ' * indent, key)
raise
elif hasattr(first, '_asdict') and hasattr(second, '_asdict'):
# Compare namedtuples as dicts so we get more useful output.
assert_records_equal_nonvolatile(first._asdict(), second._asdict(),
volatile_fields, indent)
elif hasattr(first, '__iter__') and hasattr(second, '__iter__'):
for idx, (fir, sec) in enumerate(itertools.izip(first, second)):
try:
assert_records_equal_nonvolatile(fir, sec, volatile_fields, indent + 2)
except AssertionError:
logging.error('%sIndex: %s ^', ' ' * indent, idx)
raise
elif (isinstance(first, records.RecordClass) and
isinstance(second, records.RecordClass)):
assert_records_equal_nonvolatile(
{slot: getattr(first, slot) for slot in first.__slots__},
{slot: getattr(second, slot) for slot in second.__slots__},
volatile_fields, indent)
elif first != second:
logging.error('%sRaw: "%s" != "%s"', ' ' * indent, first, second)
assert first == second | [
"def",
"assert_records_equal_nonvolatile",
"(",
"first",
",",
"second",
",",
"volatile_fields",
",",
"indent",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"first",
",",
"dict",
")",
"and",
"isinstance",
"(",
"second",
",",
"dict",
")",
":",
"if",
"set",
... | Compare two test_record tuples, ignoring any volatile fields.
'Volatile' fields include any fields that are expected to differ between
successive runs of the same test, mainly timestamps. All other fields
are recursively compared. | [
"Compare",
"two",
"test_record",
"tuples",
"ignoring",
"any",
"volatile",
"fields",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/data.py#L64-L105 | train | 221,865 |
google/openhtf | openhtf/util/data.py | convert_to_base_types | def convert_to_base_types(obj, ignore_keys=tuple(), tuple_type=tuple,
json_safe=True):
"""Recursively convert objects into base types.
This is used to convert some special types of objects used internally into
base types for more friendly output via mechanisms such as JSON. It is used
for sending internal objects via the network and outputting test records.
Specifically, the conversions that are performed:
- If an object has an as_base_types() method, immediately return the result
without any recursion; this can be used with caching in the object to
prevent unnecessary conversions.
- If an object has an _asdict() method, use that to convert it to a dict and
recursively converting its contents.
- mutablerecords Record instances are converted to dicts that map
attribute name to value. Optional attributes with a value of None are
skipped.
- Enum instances are converted to strings via their .name attribute.
- Real and integral numbers are converted to built-in types.
- Byte and unicode strings are left alone (instances of six.string_types).
- Other non-None values are converted to strings via str().
The return value contains only the Python built-in types: dict, list, tuple,
str, unicode, int, float, long, bool, and NoneType (unless tuple_type is set
to something else). If tuples should be converted to lists (e.g. for an
encoding that does not differentiate between the two), pass 'tuple_type=list'
as an argument.
If `json_safe` is True, then the float 'inf', '-inf', and 'nan' values will be
converted to strings. This ensures that the returned dictionary can be passed
to json.dumps to create valid JSON. Otherwise, json.dumps may return values
such as NaN which are not valid JSON.
"""
# Because it's *really* annoying to pass a single string accidentally.
assert not isinstance(ignore_keys, six.string_types), 'Pass a real iterable!'
if hasattr(obj, 'as_base_types'):
return obj.as_base_types()
if hasattr(obj, '_asdict'):
obj = obj._asdict()
elif isinstance(obj, records.RecordClass):
obj = {attr: getattr(obj, attr)
for attr in type(obj).all_attribute_names
if (getattr(obj, attr, None) is not None or
attr in type(obj).required_attributes)}
elif isinstance(obj, Enum):
obj = obj.name
if type(obj) in PASSTHROUGH_TYPES:
return obj
# Recursively convert values in dicts, lists, and tuples.
if isinstance(obj, dict):
return {convert_to_base_types(k, ignore_keys, tuple_type):
convert_to_base_types(v, ignore_keys, tuple_type)
for k, v in six.iteritems(obj) if k not in ignore_keys}
elif isinstance(obj, list):
return [convert_to_base_types(val, ignore_keys, tuple_type, json_safe)
for val in obj]
elif isinstance(obj, tuple):
return tuple_type(
convert_to_base_types(value, ignore_keys, tuple_type, json_safe)
for value in obj)
# Convert numeric types (e.g. numpy ints and floats) into built-in types.
elif isinstance(obj, numbers.Integral):
return long(obj)
elif isinstance(obj, numbers.Real):
as_float = float(obj)
if json_safe and (math.isinf(as_float) or math.isnan(as_float)):
return str(as_float)
return as_float
# Convert all other types to strings.
try:
return str(obj)
except:
logging.warning('Problem casting object of type %s to str.', type(obj))
raise | python | def convert_to_base_types(obj, ignore_keys=tuple(), tuple_type=tuple,
json_safe=True):
"""Recursively convert objects into base types.
This is used to convert some special types of objects used internally into
base types for more friendly output via mechanisms such as JSON. It is used
for sending internal objects via the network and outputting test records.
Specifically, the conversions that are performed:
- If an object has an as_base_types() method, immediately return the result
without any recursion; this can be used with caching in the object to
prevent unnecessary conversions.
- If an object has an _asdict() method, use that to convert it to a dict and
recursively converting its contents.
- mutablerecords Record instances are converted to dicts that map
attribute name to value. Optional attributes with a value of None are
skipped.
- Enum instances are converted to strings via their .name attribute.
- Real and integral numbers are converted to built-in types.
- Byte and unicode strings are left alone (instances of six.string_types).
- Other non-None values are converted to strings via str().
The return value contains only the Python built-in types: dict, list, tuple,
str, unicode, int, float, long, bool, and NoneType (unless tuple_type is set
to something else). If tuples should be converted to lists (e.g. for an
encoding that does not differentiate between the two), pass 'tuple_type=list'
as an argument.
If `json_safe` is True, then the float 'inf', '-inf', and 'nan' values will be
converted to strings. This ensures that the returned dictionary can be passed
to json.dumps to create valid JSON. Otherwise, json.dumps may return values
such as NaN which are not valid JSON.
"""
# Because it's *really* annoying to pass a single string accidentally.
assert not isinstance(ignore_keys, six.string_types), 'Pass a real iterable!'
if hasattr(obj, 'as_base_types'):
return obj.as_base_types()
if hasattr(obj, '_asdict'):
obj = obj._asdict()
elif isinstance(obj, records.RecordClass):
obj = {attr: getattr(obj, attr)
for attr in type(obj).all_attribute_names
if (getattr(obj, attr, None) is not None or
attr in type(obj).required_attributes)}
elif isinstance(obj, Enum):
obj = obj.name
if type(obj) in PASSTHROUGH_TYPES:
return obj
# Recursively convert values in dicts, lists, and tuples.
if isinstance(obj, dict):
return {convert_to_base_types(k, ignore_keys, tuple_type):
convert_to_base_types(v, ignore_keys, tuple_type)
for k, v in six.iteritems(obj) if k not in ignore_keys}
elif isinstance(obj, list):
return [convert_to_base_types(val, ignore_keys, tuple_type, json_safe)
for val in obj]
elif isinstance(obj, tuple):
return tuple_type(
convert_to_base_types(value, ignore_keys, tuple_type, json_safe)
for value in obj)
# Convert numeric types (e.g. numpy ints and floats) into built-in types.
elif isinstance(obj, numbers.Integral):
return long(obj)
elif isinstance(obj, numbers.Real):
as_float = float(obj)
if json_safe and (math.isinf(as_float) or math.isnan(as_float)):
return str(as_float)
return as_float
# Convert all other types to strings.
try:
return str(obj)
except:
logging.warning('Problem casting object of type %s to str.', type(obj))
raise | [
"def",
"convert_to_base_types",
"(",
"obj",
",",
"ignore_keys",
"=",
"tuple",
"(",
")",
",",
"tuple_type",
"=",
"tuple",
",",
"json_safe",
"=",
"True",
")",
":",
"# Because it's *really* annoying to pass a single string accidentally.",
"assert",
"not",
"isinstance",
"... | Recursively convert objects into base types.
This is used to convert some special types of objects used internally into
base types for more friendly output via mechanisms such as JSON. It is used
for sending internal objects via the network and outputting test records.
Specifically, the conversions that are performed:
- If an object has an as_base_types() method, immediately return the result
without any recursion; this can be used with caching in the object to
prevent unnecessary conversions.
- If an object has an _asdict() method, use that to convert it to a dict and
recursively converting its contents.
- mutablerecords Record instances are converted to dicts that map
attribute name to value. Optional attributes with a value of None are
skipped.
- Enum instances are converted to strings via their .name attribute.
- Real and integral numbers are converted to built-in types.
- Byte and unicode strings are left alone (instances of six.string_types).
- Other non-None values are converted to strings via str().
The return value contains only the Python built-in types: dict, list, tuple,
str, unicode, int, float, long, bool, and NoneType (unless tuple_type is set
to something else). If tuples should be converted to lists (e.g. for an
encoding that does not differentiate between the two), pass 'tuple_type=list'
as an argument.
If `json_safe` is True, then the float 'inf', '-inf', and 'nan' values will be
converted to strings. This ensures that the returned dictionary can be passed
to json.dumps to create valid JSON. Otherwise, json.dumps may return values
such as NaN which are not valid JSON. | [
"Recursively",
"convert",
"objects",
"into",
"base",
"types",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/data.py#L108-L186 | train | 221,866 |
google/openhtf | openhtf/util/data.py | total_size | def total_size(obj):
"""Returns the approximate total memory footprint an object."""
seen = set()
def sizeof(current_obj):
try:
return _sizeof(current_obj)
except Exception: # pylint: disable=broad-except
# Not sure what just happened, but let's assume it's a reference.
return struct.calcsize('P')
def _sizeof(current_obj):
"""Do a depth-first acyclic traversal of all reachable objects."""
if id(current_obj) in seen:
# A rough approximation of the size cost of an additional reference.
return struct.calcsize('P')
seen.add(id(current_obj))
size = sys.getsizeof(current_obj)
if isinstance(current_obj, dict):
size += sum(map(sizeof, itertools.chain.from_iterable(
six.iteritems(current_obj))))
elif (isinstance(current_obj, collections.Iterable) and
not isinstance(current_obj, six.string_types)):
size += sum(sizeof(item) for item in current_obj)
elif isinstance(current_obj, records.RecordClass):
size += sum(sizeof(getattr(current_obj, attr))
for attr in current_obj.__slots__)
return size
return sizeof(obj) | python | def total_size(obj):
"""Returns the approximate total memory footprint an object."""
seen = set()
def sizeof(current_obj):
try:
return _sizeof(current_obj)
except Exception: # pylint: disable=broad-except
# Not sure what just happened, but let's assume it's a reference.
return struct.calcsize('P')
def _sizeof(current_obj):
"""Do a depth-first acyclic traversal of all reachable objects."""
if id(current_obj) in seen:
# A rough approximation of the size cost of an additional reference.
return struct.calcsize('P')
seen.add(id(current_obj))
size = sys.getsizeof(current_obj)
if isinstance(current_obj, dict):
size += sum(map(sizeof, itertools.chain.from_iterable(
six.iteritems(current_obj))))
elif (isinstance(current_obj, collections.Iterable) and
not isinstance(current_obj, six.string_types)):
size += sum(sizeof(item) for item in current_obj)
elif isinstance(current_obj, records.RecordClass):
size += sum(sizeof(getattr(current_obj, attr))
for attr in current_obj.__slots__)
return size
return sizeof(obj) | [
"def",
"total_size",
"(",
"obj",
")",
":",
"seen",
"=",
"set",
"(",
")",
"def",
"sizeof",
"(",
"current_obj",
")",
":",
"try",
":",
"return",
"_sizeof",
"(",
"current_obj",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"# Not sure what jus... | Returns the approximate total memory footprint an object. | [
"Returns",
"the",
"approximate",
"total",
"memory",
"footprint",
"an",
"object",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/data.py#L189-L218 | train | 221,867 |
google/openhtf | openhtf/output/callbacks/__init__.py | OutputToFile.open_output_file | def open_output_file(self, test_record):
"""Open file based on pattern."""
# Ignore keys for the log filename to not convert larger data structures.
record_dict = data.convert_to_base_types(
test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if isinstance(pattern, six.string_types) or callable(pattern):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
yield output_file
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
yield self.filename_pattern
else:
raise ValueError(
'filename_pattern must be string, callable, or File-like object') | python | def open_output_file(self, test_record):
"""Open file based on pattern."""
# Ignore keys for the log filename to not convert larger data structures.
record_dict = data.convert_to_base_types(
test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if isinstance(pattern, six.string_types) or callable(pattern):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
yield output_file
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
yield self.filename_pattern
else:
raise ValueError(
'filename_pattern must be string, callable, or File-like object') | [
"def",
"open_output_file",
"(",
"self",
",",
"test_record",
")",
":",
"# Ignore keys for the log filename to not convert larger data structures.",
"record_dict",
"=",
"data",
".",
"convert_to_base_types",
"(",
"test_record",
",",
"ignore_keys",
"=",
"(",
"'code_info'",
",",... | Open file based on pattern. | [
"Open",
"file",
"based",
"on",
"pattern",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/callbacks/__init__.py#L82-L98 | train | 221,868 |
google/openhtf | pylint_plugins/mutablerecords_plugin.py | mutable_record_transform | def mutable_record_transform(cls):
"""Transform mutable records usage by updating locals."""
if not (len(cls.bases) > 0
and isinstance(cls.bases[0], astroid.Call)
and cls.bases[0].func.as_string() == 'mutablerecords.Record'):
return
try:
# Add required attributes.
if len(cls.bases[0].args) >= 2:
for a in cls.bases[0].args[1].elts:
cls.locals[a] = [None]
# Add optional attributes.
if len(cls.bases[0].args) >= 3:
for a,b in cls.bases[0].args[2].items:
cls.locals[a.value] = [None]
except:
raise SyntaxError('Invalid mutablerecords syntax') | python | def mutable_record_transform(cls):
"""Transform mutable records usage by updating locals."""
if not (len(cls.bases) > 0
and isinstance(cls.bases[0], astroid.Call)
and cls.bases[0].func.as_string() == 'mutablerecords.Record'):
return
try:
# Add required attributes.
if len(cls.bases[0].args) >= 2:
for a in cls.bases[0].args[1].elts:
cls.locals[a] = [None]
# Add optional attributes.
if len(cls.bases[0].args) >= 3:
for a,b in cls.bases[0].args[2].items:
cls.locals[a.value] = [None]
except:
raise SyntaxError('Invalid mutablerecords syntax') | [
"def",
"mutable_record_transform",
"(",
"cls",
")",
":",
"if",
"not",
"(",
"len",
"(",
"cls",
".",
"bases",
")",
">",
"0",
"and",
"isinstance",
"(",
"cls",
".",
"bases",
"[",
"0",
"]",
",",
"astroid",
".",
"Call",
")",
"and",
"cls",
".",
"bases",
... | Transform mutable records usage by updating locals. | [
"Transform",
"mutable",
"records",
"usage",
"by",
"updating",
"locals",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/pylint_plugins/mutablerecords_plugin.py#L25-L44 | train | 221,869 |
google/openhtf | openhtf/util/__init__.py | _log_every_n_to_logger | def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name
"""Logs the given message every n calls to a logger.
Args:
n: Number of calls before logging.
logger: The logger to which to log.
level: The logging level (e.g. logging.INFO).
message: A message to log
*args: Any format args for the message.
Returns:
A method that logs and returns True every n calls.
"""
logger = logger or logging.getLogger()
def _gen(): # pylint: disable=missing-docstring
while True:
for _ in range(n):
yield False
logger.log(level, message, *args)
yield True
gen = _gen()
return lambda: six.next(gen) | python | def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name
"""Logs the given message every n calls to a logger.
Args:
n: Number of calls before logging.
logger: The logger to which to log.
level: The logging level (e.g. logging.INFO).
message: A message to log
*args: Any format args for the message.
Returns:
A method that logs and returns True every n calls.
"""
logger = logger or logging.getLogger()
def _gen(): # pylint: disable=missing-docstring
while True:
for _ in range(n):
yield False
logger.log(level, message, *args)
yield True
gen = _gen()
return lambda: six.next(gen) | [
"def",
"_log_every_n_to_logger",
"(",
"n",
",",
"logger",
",",
"level",
",",
"message",
",",
"*",
"args",
")",
":",
"# pylint: disable=invalid-name",
"logger",
"=",
"logger",
"or",
"logging",
".",
"getLogger",
"(",
")",
"def",
"_gen",
"(",
")",
":",
"# pyl... | Logs the given message every n calls to a logger.
Args:
n: Number of calls before logging.
logger: The logger to which to log.
level: The logging level (e.g. logging.INFO).
message: A message to log
*args: Any format args for the message.
Returns:
A method that logs and returns True every n calls. | [
"Logs",
"the",
"given",
"message",
"every",
"n",
"calls",
"to",
"a",
"logger",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/__init__.py#L29-L49 | train | 221,870 |
google/openhtf | openhtf/util/__init__.py | log_every_n | def log_every_n(n, level, message, *args): # pylint: disable=invalid-name
"""Logs a message every n calls. See _log_every_n_to_logger."""
return _log_every_n_to_logger(n, None, level, message, *args) | python | def log_every_n(n, level, message, *args): # pylint: disable=invalid-name
"""Logs a message every n calls. See _log_every_n_to_logger."""
return _log_every_n_to_logger(n, None, level, message, *args) | [
"def",
"log_every_n",
"(",
"n",
",",
"level",
",",
"message",
",",
"*",
"args",
")",
":",
"# pylint: disable=invalid-name",
"return",
"_log_every_n_to_logger",
"(",
"n",
",",
"None",
",",
"level",
",",
"message",
",",
"*",
"args",
")"
] | Logs a message every n calls. See _log_every_n_to_logger. | [
"Logs",
"a",
"message",
"every",
"n",
"calls",
".",
"See",
"_log_every_n_to_logger",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/__init__.py#L52-L54 | train | 221,871 |
google/openhtf | openhtf/util/__init__.py | partial_format | def partial_format(target, **kwargs):
"""Formats a string without requiring all values to be present.
This function allows substitutions to be gradually made in several steps
rather than all at once. Similar to string.Template.safe_substitute.
"""
output = target[:]
for tag, var in re.findall(r'(\{(.*?)\})', output):
root = var.split('.')[0] # dot notation
root = root.split('[')[0] # dict notation
if root in kwargs:
output = output.replace(tag, tag.format(**{root: kwargs[root]}))
return output | python | def partial_format(target, **kwargs):
"""Formats a string without requiring all values to be present.
This function allows substitutions to be gradually made in several steps
rather than all at once. Similar to string.Template.safe_substitute.
"""
output = target[:]
for tag, var in re.findall(r'(\{(.*?)\})', output):
root = var.split('.')[0] # dot notation
root = root.split('[')[0] # dict notation
if root in kwargs:
output = output.replace(tag, tag.format(**{root: kwargs[root]}))
return output | [
"def",
"partial_format",
"(",
"target",
",",
"*",
"*",
"kwargs",
")",
":",
"output",
"=",
"target",
"[",
":",
"]",
"for",
"tag",
",",
"var",
"in",
"re",
".",
"findall",
"(",
"r'(\\{(.*?)\\})'",
",",
"output",
")",
":",
"root",
"=",
"var",
".",
"spl... | Formats a string without requiring all values to be present.
This function allows substitutions to be gradually made in several steps
rather than all at once. Similar to string.Template.safe_substitute. | [
"Formats",
"a",
"string",
"without",
"requiring",
"all",
"values",
"to",
"be",
"present",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/__init__.py#L96-L110 | train | 221,872 |
google/openhtf | openhtf/util/__init__.py | SubscribableStateMixin.asdict_with_event | def asdict_with_event(self):
"""Get a dict representation of this object and an update event.
Returns:
state: Dict representation of this object.
update_event: An event that is guaranteed to be set if an update has been
triggered since the returned dict was generated.
"""
event = threading.Event()
with self._lock:
self._update_events.add(event)
return self._asdict(), event | python | def asdict_with_event(self):
"""Get a dict representation of this object and an update event.
Returns:
state: Dict representation of this object.
update_event: An event that is guaranteed to be set if an update has been
triggered since the returned dict was generated.
"""
event = threading.Event()
with self._lock:
self._update_events.add(event)
return self._asdict(), event | [
"def",
"asdict_with_event",
"(",
"self",
")",
":",
"event",
"=",
"threading",
".",
"Event",
"(",
")",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_update_events",
".",
"add",
"(",
"event",
")",
"return",
"self",
".",
"_asdict",
"(",
")",
",",
"e... | Get a dict representation of this object and an update event.
Returns:
state: Dict representation of this object.
update_event: An event that is guaranteed to be set if an update has been
triggered since the returned dict was generated. | [
"Get",
"a",
"dict",
"representation",
"of",
"this",
"object",
"and",
"an",
"update",
"event",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/__init__.py#L158-L169 | train | 221,873 |
google/openhtf | openhtf/util/__init__.py | SubscribableStateMixin.notify_update | def notify_update(self):
"""Notify any update events that there was an update."""
with self._lock:
for event in self._update_events:
event.set()
self._update_events.clear() | python | def notify_update(self):
"""Notify any update events that there was an update."""
with self._lock:
for event in self._update_events:
event.set()
self._update_events.clear() | [
"def",
"notify_update",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock",
":",
"for",
"event",
"in",
"self",
".",
"_update_events",
":",
"event",
".",
"set",
"(",
")",
"self",
".",
"_update_events",
".",
"clear",
"(",
")"
] | Notify any update events that there was an update. | [
"Notify",
"any",
"update",
"events",
"that",
"there",
"was",
"an",
"update",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/__init__.py#L171-L176 | train | 221,874 |
google/openhtf | openhtf/output/servers/web_gui_server.py | bind_port | def bind_port(requested_port):
"""Bind sockets to an available port, returning sockets and the bound port."""
sockets = tornado.netutil.bind_sockets(requested_port)
if requested_port != 0:
return sockets, requested_port
# Get the actual port number.
for s in sockets:
host, port = s.getsockname()[:2]
if host == '0.0.0.0':
return sockets, port
raise RuntimeError('Could not determine the bound port.') | python | def bind_port(requested_port):
"""Bind sockets to an available port, returning sockets and the bound port."""
sockets = tornado.netutil.bind_sockets(requested_port)
if requested_port != 0:
return sockets, requested_port
# Get the actual port number.
for s in sockets:
host, port = s.getsockname()[:2]
if host == '0.0.0.0':
return sockets, port
raise RuntimeError('Could not determine the bound port.') | [
"def",
"bind_port",
"(",
"requested_port",
")",
":",
"sockets",
"=",
"tornado",
".",
"netutil",
".",
"bind_sockets",
"(",
"requested_port",
")",
"if",
"requested_port",
"!=",
"0",
":",
"return",
"sockets",
",",
"requested_port",
"# Get the actual port number.",
"f... | Bind sockets to an available port, returning sockets and the bound port. | [
"Bind",
"sockets",
"to",
"an",
"available",
"port",
"returning",
"sockets",
"and",
"the",
"bound",
"port",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/web_gui_server.py#L47-L60 | train | 221,875 |
google/openhtf | openhtf/util/timeouts.py | loop_until_timeout_or_valid | def loop_until_timeout_or_valid(timeout_s, function, validation_fn, sleep_s=1): # pylint: disable=invalid-name
"""Loops until the specified function returns valid or a timeout is reached.
Note: The function may return anything which, when passed to validation_fn,
evaluates to implicit True. This function will loop calling the function as
long as the result of validation_fn(function_result) returns something which
evaluates to False. We ensure function is called at least once regardless
of timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
validation_fn: The validation function called on the function result to
determine whether to keep looping.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
"""
if timeout_s is None or not hasattr(timeout_s, 'has_expired'):
timeout_s = PolledTimeout(timeout_s)
while True:
# Calls the function at least once
result = function()
if validation_fn(result) or timeout_s.has_expired():
return result
time.sleep(sleep_s) | python | def loop_until_timeout_or_valid(timeout_s, function, validation_fn, sleep_s=1): # pylint: disable=invalid-name
"""Loops until the specified function returns valid or a timeout is reached.
Note: The function may return anything which, when passed to validation_fn,
evaluates to implicit True. This function will loop calling the function as
long as the result of validation_fn(function_result) returns something which
evaluates to False. We ensure function is called at least once regardless
of timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
validation_fn: The validation function called on the function result to
determine whether to keep looping.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
"""
if timeout_s is None or not hasattr(timeout_s, 'has_expired'):
timeout_s = PolledTimeout(timeout_s)
while True:
# Calls the function at least once
result = function()
if validation_fn(result) or timeout_s.has_expired():
return result
time.sleep(sleep_s) | [
"def",
"loop_until_timeout_or_valid",
"(",
"timeout_s",
",",
"function",
",",
"validation_fn",
",",
"sleep_s",
"=",
"1",
")",
":",
"# pylint: disable=invalid-name",
"if",
"timeout_s",
"is",
"None",
"or",
"not",
"hasattr",
"(",
"timeout_s",
",",
"'has_expired'",
")... | Loops until the specified function returns valid or a timeout is reached.
Note: The function may return anything which, when passed to validation_fn,
evaluates to implicit True. This function will loop calling the function as
long as the result of validation_fn(function_result) returns something which
evaluates to False. We ensure function is called at least once regardless
of timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
validation_fn: The validation function called on the function result to
determine whether to keep looping.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last. | [
"Loops",
"until",
"the",
"specified",
"function",
"returns",
"valid",
"or",
"a",
"timeout",
"is",
"reached",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L122-L151 | train | 221,876 |
google/openhtf | openhtf/util/timeouts.py | loop_until_timeout_or_true | def loop_until_timeout_or_true(timeout_s, function, sleep_s=1): # pylint: disable=invalid-name
"""Loops until the specified function returns True or a timeout is reached.
Note: The function may return anything which evaluates to implicit True. This
function will loop calling it as long as it continues to return something
which evaluates to False. We ensure this method is called at least once
regardless of timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
"""
return loop_until_timeout_or_valid(timeout_s, function, lambda x: x, sleep_s) | python | def loop_until_timeout_or_true(timeout_s, function, sleep_s=1): # pylint: disable=invalid-name
"""Loops until the specified function returns True or a timeout is reached.
Note: The function may return anything which evaluates to implicit True. This
function will loop calling it as long as it continues to return something
which evaluates to False. We ensure this method is called at least once
regardless of timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
"""
return loop_until_timeout_or_valid(timeout_s, function, lambda x: x, sleep_s) | [
"def",
"loop_until_timeout_or_true",
"(",
"timeout_s",
",",
"function",
",",
"sleep_s",
"=",
"1",
")",
":",
"# pylint: disable=invalid-name",
"return",
"loop_until_timeout_or_valid",
"(",
"timeout_s",
",",
"function",
",",
"lambda",
"x",
":",
"x",
",",
"sleep_s",
... | Loops until the specified function returns True or a timeout is reached.
Note: The function may return anything which evaluates to implicit True. This
function will loop calling it as long as it continues to return something
which evaluates to False. We ensure this method is called at least once
regardless of timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last. | [
"Loops",
"until",
"the",
"specified",
"function",
"returns",
"True",
"or",
"a",
"timeout",
"is",
"reached",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L154-L172 | train | 221,877 |
google/openhtf | openhtf/util/timeouts.py | loop_until_timeout_or_not_none | def loop_until_timeout_or_not_none(timeout_s, function, sleep_s=1): # pylint: disable=invalid-name
"""Loops until the specified function returns non-None or until a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
"""
return loop_until_timeout_or_valid(
timeout_s, function, lambda x: x is not None, sleep_s) | python | def loop_until_timeout_or_not_none(timeout_s, function, sleep_s=1): # pylint: disable=invalid-name
"""Loops until the specified function returns non-None or until a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
"""
return loop_until_timeout_or_valid(
timeout_s, function, lambda x: x is not None, sleep_s) | [
"def",
"loop_until_timeout_or_not_none",
"(",
"timeout_s",
",",
"function",
",",
"sleep_s",
"=",
"1",
")",
":",
"# pylint: disable=invalid-name",
"return",
"loop_until_timeout_or_valid",
"(",
"timeout_s",
",",
"function",
",",
"lambda",
"x",
":",
"x",
"is",
"not",
... | Loops until the specified function returns non-None or until a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last. | [
"Loops",
"until",
"the",
"specified",
"function",
"returns",
"non",
"-",
"None",
"or",
"until",
"a",
"timeout",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L175-L189 | train | 221,878 |
google/openhtf | openhtf/util/timeouts.py | loop_until_true_else_raise | def loop_until_true_else_raise(timeout_s,
function,
invert=False,
message=None,
sleep_s=1):
"""Repeatedly call the given function until truthy, or raise on a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
invert: If True, wait for the callable to return falsey instead of truthy.
message: Optional custom error message to use on a timeout.
sleep_s: Seconds to sleep between call attempts.
Returns:
The final return value of the function.
Raises:
RuntimeError if the timeout is reached before the function returns truthy.
"""
def validate(x):
return bool(x) != invert
result = loop_until_timeout_or_valid(timeout_s, function, validate, sleep_s=1)
if validate(result):
return result
if message is not None:
raise RuntimeError(message)
name = '(unknown)'
if hasattr(function, '__name__'):
name = function.__name__
elif (isinstance(function, functools.partial)
and hasattr(function.func, '__name__')):
name = function.func.__name__
raise RuntimeError(
'Function %s failed to return %s within %d seconds.'
% (name, 'falsey' if invert else 'truthy', timeout_s)) | python | def loop_until_true_else_raise(timeout_s,
function,
invert=False,
message=None,
sleep_s=1):
"""Repeatedly call the given function until truthy, or raise on a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
invert: If True, wait for the callable to return falsey instead of truthy.
message: Optional custom error message to use on a timeout.
sleep_s: Seconds to sleep between call attempts.
Returns:
The final return value of the function.
Raises:
RuntimeError if the timeout is reached before the function returns truthy.
"""
def validate(x):
return bool(x) != invert
result = loop_until_timeout_or_valid(timeout_s, function, validate, sleep_s=1)
if validate(result):
return result
if message is not None:
raise RuntimeError(message)
name = '(unknown)'
if hasattr(function, '__name__'):
name = function.__name__
elif (isinstance(function, functools.partial)
and hasattr(function.func, '__name__')):
name = function.func.__name__
raise RuntimeError(
'Function %s failed to return %s within %d seconds.'
% (name, 'falsey' if invert else 'truthy', timeout_s)) | [
"def",
"loop_until_true_else_raise",
"(",
"timeout_s",
",",
"function",
",",
"invert",
"=",
"False",
",",
"message",
"=",
"None",
",",
"sleep_s",
"=",
"1",
")",
":",
"def",
"validate",
"(",
"x",
")",
":",
"return",
"bool",
"(",
"x",
")",
"!=",
"invert"... | Repeatedly call the given function until truthy, or raise on a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
invert: If True, wait for the callable to return falsey instead of truthy.
message: Optional custom error message to use on a timeout.
sleep_s: Seconds to sleep between call attempts.
Returns:
The final return value of the function.
Raises:
RuntimeError if the timeout is reached before the function returns truthy. | [
"Repeatedly",
"call",
"the",
"given",
"function",
"until",
"truthy",
"or",
"raise",
"on",
"a",
"timeout",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L192-L232 | train | 221,879 |
google/openhtf | openhtf/util/timeouts.py | execute_forever | def execute_forever(method, interval_s): # pylint: disable=invalid-name
"""Executes a method forever at the specified interval.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object.
"""
interval = Interval(method)
interval.start(interval_s)
return interval | python | def execute_forever(method, interval_s): # pylint: disable=invalid-name
"""Executes a method forever at the specified interval.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object.
"""
interval = Interval(method)
interval.start(interval_s)
return interval | [
"def",
"execute_forever",
"(",
"method",
",",
"interval_s",
")",
":",
"# pylint: disable=invalid-name",
"interval",
"=",
"Interval",
"(",
"method",
")",
"interval",
".",
"start",
"(",
"interval_s",
")",
"return",
"interval"
] | Executes a method forever at the specified interval.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object. | [
"Executes",
"a",
"method",
"forever",
"at",
"the",
"specified",
"interval",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L316-L328 | train | 221,880 |
google/openhtf | openhtf/util/timeouts.py | execute_until_false | def execute_until_false(method, interval_s): # pylint: disable=invalid-name
"""Executes a method forever until the method returns a false value.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object.
"""
interval = Interval(method, stop_if_false=True)
interval.start(interval_s)
return interval | python | def execute_until_false(method, interval_s): # pylint: disable=invalid-name
"""Executes a method forever until the method returns a false value.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object.
"""
interval = Interval(method, stop_if_false=True)
interval.start(interval_s)
return interval | [
"def",
"execute_until_false",
"(",
"method",
",",
"interval_s",
")",
":",
"# pylint: disable=invalid-name",
"interval",
"=",
"Interval",
"(",
"method",
",",
"stop_if_false",
"=",
"True",
")",
"interval",
".",
"start",
"(",
"interval_s",
")",
"return",
"interval"
] | Executes a method forever until the method returns a false value.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object. | [
"Executes",
"a",
"method",
"forever",
"until",
"the",
"method",
"returns",
"a",
"false",
"value",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L331-L343 | train | 221,881 |
google/openhtf | openhtf/util/timeouts.py | retry_until_true_or_limit_reached | def retry_until_true_or_limit_reached(method, limit, sleep_s=1,
catch_exceptions=()):
"""Executes a method until the retry limit is hit or True is returned."""
return retry_until_valid_or_limit_reached(
method, limit, lambda x: x, sleep_s, catch_exceptions) | python | def retry_until_true_or_limit_reached(method, limit, sleep_s=1,
catch_exceptions=()):
"""Executes a method until the retry limit is hit or True is returned."""
return retry_until_valid_or_limit_reached(
method, limit, lambda x: x, sleep_s, catch_exceptions) | [
"def",
"retry_until_true_or_limit_reached",
"(",
"method",
",",
"limit",
",",
"sleep_s",
"=",
"1",
",",
"catch_exceptions",
"=",
"(",
")",
")",
":",
"return",
"retry_until_valid_or_limit_reached",
"(",
"method",
",",
"limit",
",",
"lambda",
"x",
":",
"x",
",",... | Executes a method until the retry limit is hit or True is returned. | [
"Executes",
"a",
"method",
"until",
"the",
"retry",
"limit",
"is",
"hit",
"or",
"True",
"is",
"returned",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L347-L351 | train | 221,882 |
google/openhtf | openhtf/util/timeouts.py | retry_until_not_none_or_limit_reached | def retry_until_not_none_or_limit_reached(method, limit, sleep_s=1,
catch_exceptions=()):
"""Executes a method until the retry limit is hit or not None is returned."""
return retry_until_valid_or_limit_reached(
method, limit, lambda x: x is not None, sleep_s, catch_exceptions) | python | def retry_until_not_none_or_limit_reached(method, limit, sleep_s=1,
catch_exceptions=()):
"""Executes a method until the retry limit is hit or not None is returned."""
return retry_until_valid_or_limit_reached(
method, limit, lambda x: x is not None, sleep_s, catch_exceptions) | [
"def",
"retry_until_not_none_or_limit_reached",
"(",
"method",
",",
"limit",
",",
"sleep_s",
"=",
"1",
",",
"catch_exceptions",
"=",
"(",
")",
")",
":",
"return",
"retry_until_valid_or_limit_reached",
"(",
"method",
",",
"limit",
",",
"lambda",
"x",
":",
"x",
... | Executes a method until the retry limit is hit or not None is returned. | [
"Executes",
"a",
"method",
"until",
"the",
"retry",
"limit",
"is",
"hit",
"or",
"not",
"None",
"is",
"returned",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L354-L358 | train | 221,883 |
google/openhtf | openhtf/util/timeouts.py | retry_until_valid_or_limit_reached | def retry_until_valid_or_limit_reached(method, limit, validation_fn, sleep_s=1,
catch_exceptions=()):
"""Executes a method until the retry limit or validation_fn returns True.
The method is always called once so the effective lower limit for 'limit' is
1. Passing in a number less than 1 will still result it the method being
called once.
Args:
method: The method to execute should take no arguments.
limit: The number of times to try this method. Must be >0.
validation_fn: The validation function called on the function result to
determine whether to keep looping.
sleep_s: The time to sleep in between invocations.
catch_exceptions: Tuple of exception types to catch and count as failures.
Returns:
Whatever the method last returned, implicit False would indicate the
method never succeeded.
"""
assert limit > 0, 'Limit must be greater than 0'
def _execute_method(helper):
try:
return method()
except catch_exceptions:
if not helper.remaining:
raise
return None
helper = RetryHelper(limit - 1)
result = _execute_method(helper)
while not validation_fn(result) and helper.retry_if_possible():
time.sleep(sleep_s)
result = _execute_method(helper)
return result | python | def retry_until_valid_or_limit_reached(method, limit, validation_fn, sleep_s=1,
catch_exceptions=()):
"""Executes a method until the retry limit or validation_fn returns True.
The method is always called once so the effective lower limit for 'limit' is
1. Passing in a number less than 1 will still result it the method being
called once.
Args:
method: The method to execute should take no arguments.
limit: The number of times to try this method. Must be >0.
validation_fn: The validation function called on the function result to
determine whether to keep looping.
sleep_s: The time to sleep in between invocations.
catch_exceptions: Tuple of exception types to catch and count as failures.
Returns:
Whatever the method last returned, implicit False would indicate the
method never succeeded.
"""
assert limit > 0, 'Limit must be greater than 0'
def _execute_method(helper):
try:
return method()
except catch_exceptions:
if not helper.remaining:
raise
return None
helper = RetryHelper(limit - 1)
result = _execute_method(helper)
while not validation_fn(result) and helper.retry_if_possible():
time.sleep(sleep_s)
result = _execute_method(helper)
return result | [
"def",
"retry_until_valid_or_limit_reached",
"(",
"method",
",",
"limit",
",",
"validation_fn",
",",
"sleep_s",
"=",
"1",
",",
"catch_exceptions",
"=",
"(",
")",
")",
":",
"assert",
"limit",
">",
"0",
",",
"'Limit must be greater than 0'",
"def",
"_execute_method"... | Executes a method until the retry limit or validation_fn returns True.
The method is always called once so the effective lower limit for 'limit' is
1. Passing in a number less than 1 will still result it the method being
called once.
Args:
method: The method to execute should take no arguments.
limit: The number of times to try this method. Must be >0.
validation_fn: The validation function called on the function result to
determine whether to keep looping.
sleep_s: The time to sleep in between invocations.
catch_exceptions: Tuple of exception types to catch and count as failures.
Returns:
Whatever the method last returned, implicit False would indicate the
method never succeeded. | [
"Executes",
"a",
"method",
"until",
"the",
"retry",
"limit",
"or",
"validation_fn",
"returns",
"True",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L361-L395 | train | 221,884 |
google/openhtf | openhtf/util/timeouts.py | take_at_least_n_seconds | def take_at_least_n_seconds(time_s):
"""A context manager which ensures it takes at least time_s to execute.
Example:
with take_at_least_n_seconds(5):
do.Something()
do.SomethingElse()
# if Something and SomethingElse took 3 seconds, the with block with sleep
# for 2 seconds before exiting.
Args:
time_s: The number of seconds this block should take. If it doesn't take at
least this time, then this method blocks during __exit__.
Yields:
To do some actions then on completion waits the remaining time.
"""
timeout = PolledTimeout(time_s)
yield
while not timeout.has_expired():
time.sleep(timeout.remaining) | python | def take_at_least_n_seconds(time_s):
"""A context manager which ensures it takes at least time_s to execute.
Example:
with take_at_least_n_seconds(5):
do.Something()
do.SomethingElse()
# if Something and SomethingElse took 3 seconds, the with block with sleep
# for 2 seconds before exiting.
Args:
time_s: The number of seconds this block should take. If it doesn't take at
least this time, then this method blocks during __exit__.
Yields:
To do some actions then on completion waits the remaining time.
"""
timeout = PolledTimeout(time_s)
yield
while not timeout.has_expired():
time.sleep(timeout.remaining) | [
"def",
"take_at_least_n_seconds",
"(",
"time_s",
")",
":",
"timeout",
"=",
"PolledTimeout",
"(",
"time_s",
")",
"yield",
"while",
"not",
"timeout",
".",
"has_expired",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"timeout",
".",
"remaining",
")"
] | A context manager which ensures it takes at least time_s to execute.
Example:
with take_at_least_n_seconds(5):
do.Something()
do.SomethingElse()
# if Something and SomethingElse took 3 seconds, the with block with sleep
# for 2 seconds before exiting.
Args:
time_s: The number of seconds this block should take. If it doesn't take at
least this time, then this method blocks during __exit__.
Yields:
To do some actions then on completion waits the remaining time. | [
"A",
"context",
"manager",
"which",
"ensures",
"it",
"takes",
"at",
"least",
"time_s",
"to",
"execute",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L401-L419 | train | 221,885 |
google/openhtf | openhtf/util/timeouts.py | take_at_most_n_seconds | def take_at_most_n_seconds(time_s, func, *args, **kwargs):
"""A function that returns whether a function call took less than time_s.
NOTE: The function call is not killed and will run indefinitely if hung.
Args:
time_s: Maximum amount of time to take.
func: Function to call.
*args: Arguments to call the function with.
**kwargs: Keyword arguments to call the function with.
Returns:
True if the function finished in less than time_s seconds.
"""
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
thread.join(time_s)
if thread.is_alive():
return False
return True | python | def take_at_most_n_seconds(time_s, func, *args, **kwargs):
"""A function that returns whether a function call took less than time_s.
NOTE: The function call is not killed and will run indefinitely if hung.
Args:
time_s: Maximum amount of time to take.
func: Function to call.
*args: Arguments to call the function with.
**kwargs: Keyword arguments to call the function with.
Returns:
True if the function finished in less than time_s seconds.
"""
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
thread.join(time_s)
if thread.is_alive():
return False
return True | [
"def",
"take_at_most_n_seconds",
"(",
"time_s",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"func",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"thr... | A function that returns whether a function call took less than time_s.
NOTE: The function call is not killed and will run indefinitely if hung.
Args:
time_s: Maximum amount of time to take.
func: Function to call.
*args: Arguments to call the function with.
**kwargs: Keyword arguments to call the function with.
Returns:
True if the function finished in less than time_s seconds. | [
"A",
"function",
"that",
"returns",
"whether",
"a",
"function",
"call",
"took",
"less",
"than",
"time_s",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L422-L440 | train | 221,886 |
google/openhtf | openhtf/util/timeouts.py | execute_after_delay | def execute_after_delay(time_s, func, *args, **kwargs):
"""A function that executes the given function after a delay.
Executes func in a separate thread after a delay, so that this function
returns immediately. Note that any exceptions raised by func will be
ignored (but logged). Also, if time_s is a PolledTimeout with no expiration,
then this method simply returns immediately and does nothing.
Args:
time_s: Delay in seconds to wait before executing func, may be a
PolledTimeout object.
func: Function to call.
*args: Arguments to call the function with.
**kwargs: Keyword arguments to call the function with.
"""
timeout = PolledTimeout.from_seconds(time_s)
def target():
time.sleep(timeout.remaining)
try:
func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
_LOG.exception('Error executing %s after %s expires.', func, timeout)
if timeout.remaining is not None:
thread = threading.Thread(target=target)
thread.start() | python | def execute_after_delay(time_s, func, *args, **kwargs):
"""A function that executes the given function after a delay.
Executes func in a separate thread after a delay, so that this function
returns immediately. Note that any exceptions raised by func will be
ignored (but logged). Also, if time_s is a PolledTimeout with no expiration,
then this method simply returns immediately and does nothing.
Args:
time_s: Delay in seconds to wait before executing func, may be a
PolledTimeout object.
func: Function to call.
*args: Arguments to call the function with.
**kwargs: Keyword arguments to call the function with.
"""
timeout = PolledTimeout.from_seconds(time_s)
def target():
time.sleep(timeout.remaining)
try:
func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
_LOG.exception('Error executing %s after %s expires.', func, timeout)
if timeout.remaining is not None:
thread = threading.Thread(target=target)
thread.start() | [
"def",
"execute_after_delay",
"(",
"time_s",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"timeout",
"=",
"PolledTimeout",
".",
"from_seconds",
"(",
"time_s",
")",
"def",
"target",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"timeout... | A function that executes the given function after a delay.
Executes func in a separate thread after a delay, so that this function
returns immediately. Note that any exceptions raised by func will be
ignored (but logged). Also, if time_s is a PolledTimeout with no expiration,
then this method simply returns immediately and does nothing.
Args:
time_s: Delay in seconds to wait before executing func, may be a
PolledTimeout object.
func: Function to call.
*args: Arguments to call the function with.
**kwargs: Keyword arguments to call the function with. | [
"A",
"function",
"that",
"executes",
"the",
"given",
"function",
"after",
"a",
"delay",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L443-L468 | train | 221,887 |
google/openhtf | openhtf/util/timeouts.py | PolledTimeout.from_millis | def from_millis(cls, timeout_ms):
"""Create a new PolledTimeout if needed.
If timeout_ms is already a PolledTimeout, just return it, otherwise create a
new PolledTimeout with the given timeout in milliseconds.
Args:
timeout_ms: PolledTimeout object, or number of milliseconds to use for
creating a new one.
Returns:
A PolledTimeout object that will expire in timeout_ms milliseconds, which
may be timeout_ms itself, or a newly allocated PolledTimeout.
"""
if hasattr(timeout_ms, 'has_expired'):
return timeout_ms
if timeout_ms is None:
return cls(None)
return cls(timeout_ms / 1000.0) | python | def from_millis(cls, timeout_ms):
"""Create a new PolledTimeout if needed.
If timeout_ms is already a PolledTimeout, just return it, otherwise create a
new PolledTimeout with the given timeout in milliseconds.
Args:
timeout_ms: PolledTimeout object, or number of milliseconds to use for
creating a new one.
Returns:
A PolledTimeout object that will expire in timeout_ms milliseconds, which
may be timeout_ms itself, or a newly allocated PolledTimeout.
"""
if hasattr(timeout_ms, 'has_expired'):
return timeout_ms
if timeout_ms is None:
return cls(None)
return cls(timeout_ms / 1000.0) | [
"def",
"from_millis",
"(",
"cls",
",",
"timeout_ms",
")",
":",
"if",
"hasattr",
"(",
"timeout_ms",
",",
"'has_expired'",
")",
":",
"return",
"timeout_ms",
"if",
"timeout_ms",
"is",
"None",
":",
"return",
"cls",
"(",
"None",
")",
"return",
"cls",
"(",
"ti... | Create a new PolledTimeout if needed.
If timeout_ms is already a PolledTimeout, just return it, otherwise create a
new PolledTimeout with the given timeout in milliseconds.
Args:
timeout_ms: PolledTimeout object, or number of milliseconds to use for
creating a new one.
Returns:
A PolledTimeout object that will expire in timeout_ms milliseconds, which
may be timeout_ms itself, or a newly allocated PolledTimeout. | [
"Create",
"a",
"new",
"PolledTimeout",
"if",
"needed",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L41-L59 | train | 221,888 |
google/openhtf | openhtf/util/timeouts.py | Interval.start | def start(self, interval_s):
"""Starts executing the method at the specified interval.
Args:
interval_s: The amount of time between executions of the method.
Returns:
False if the interval was already running.
"""
if self.running:
return False
self.stopped.clear()
def _execute():
# Always execute immediately once
if not self.method() and self.stop_if_false:
return
while not self.stopped.wait(interval_s):
if not self.method() and self.stop_if_false:
return
self.thread = threading.Thread(target=_execute)
self.thread.daemon = True
self.thread.start()
return True | python | def start(self, interval_s):
"""Starts executing the method at the specified interval.
Args:
interval_s: The amount of time between executions of the method.
Returns:
False if the interval was already running.
"""
if self.running:
return False
self.stopped.clear()
def _execute():
# Always execute immediately once
if not self.method() and self.stop_if_false:
return
while not self.stopped.wait(interval_s):
if not self.method() and self.stop_if_false:
return
self.thread = threading.Thread(target=_execute)
self.thread.daemon = True
self.thread.start()
return True | [
"def",
"start",
"(",
"self",
",",
"interval_s",
")",
":",
"if",
"self",
".",
"running",
":",
"return",
"False",
"self",
".",
"stopped",
".",
"clear",
"(",
")",
"def",
"_execute",
"(",
")",
":",
"# Always execute immediately once",
"if",
"not",
"self",
".... | Starts executing the method at the specified interval.
Args:
interval_s: The amount of time between executions of the method.
Returns:
False if the interval was already running. | [
"Starts",
"executing",
"the",
"method",
"at",
"the",
"specified",
"interval",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L257-L281 | train | 221,889 |
google/openhtf | openhtf/util/timeouts.py | Interval.stop | def stop(self, timeout_s=None):
"""Stops the interval.
If a timeout is provided and stop returns False then the thread is
effectively abandoned in whatever state it was in (presumably dead-locked).
Args:
timeout_s: The time in seconds to wait on the thread to finish. By
default it's forever.
Returns:
False if a timeout was provided and we timed out.
"""
self.stopped.set()
if self.thread:
self.thread.join(timeout_s)
return not self.thread.isAlive()
else:
return True | python | def stop(self, timeout_s=None):
"""Stops the interval.
If a timeout is provided and stop returns False then the thread is
effectively abandoned in whatever state it was in (presumably dead-locked).
Args:
timeout_s: The time in seconds to wait on the thread to finish. By
default it's forever.
Returns:
False if a timeout was provided and we timed out.
"""
self.stopped.set()
if self.thread:
self.thread.join(timeout_s)
return not self.thread.isAlive()
else:
return True | [
"def",
"stop",
"(",
"self",
",",
"timeout_s",
"=",
"None",
")",
":",
"self",
".",
"stopped",
".",
"set",
"(",
")",
"if",
"self",
".",
"thread",
":",
"self",
".",
"thread",
".",
"join",
"(",
"timeout_s",
")",
"return",
"not",
"self",
".",
"thread",
... | Stops the interval.
If a timeout is provided and stop returns False then the thread is
effectively abandoned in whatever state it was in (presumably dead-locked).
Args:
timeout_s: The time in seconds to wait on the thread to finish. By
default it's forever.
Returns:
False if a timeout was provided and we timed out. | [
"Stops",
"the",
"interval",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L283-L300 | train | 221,890 |
google/openhtf | openhtf/util/timeouts.py | Interval.join | def join(self, timeout_s=None):
"""Joins blocking until the interval ends or until timeout is reached.
Args:
timeout_s: The time in seconds to wait, defaults to forever.
Returns:
True if the interval is still running and we reached the timeout.
"""
if not self.thread:
return False
self.thread.join(timeout_s)
return self.running | python | def join(self, timeout_s=None):
"""Joins blocking until the interval ends or until timeout is reached.
Args:
timeout_s: The time in seconds to wait, defaults to forever.
Returns:
True if the interval is still running and we reached the timeout.
"""
if not self.thread:
return False
self.thread.join(timeout_s)
return self.running | [
"def",
"join",
"(",
"self",
",",
"timeout_s",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"thread",
":",
"return",
"False",
"self",
".",
"thread",
".",
"join",
"(",
"timeout_s",
")",
"return",
"self",
".",
"running"
] | Joins blocking until the interval ends or until timeout is reached.
Args:
timeout_s: The time in seconds to wait, defaults to forever.
Returns:
True if the interval is still running and we reached the timeout. | [
"Joins",
"blocking",
"until",
"the",
"interval",
"ends",
"or",
"until",
"timeout",
"is",
"reached",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L302-L313 | train | 221,891 |
google/openhtf | pylint_plugins/conf_plugin.py | transform_declare | def transform_declare(node):
"""Transform conf.declare calls by stashing the declared names."""
global CURRENT_ROOT
if not (isinstance(node.func, astroid.Attribute)
and isinstance(node.func.expr, astroid.Name)
and node.func.expr.name == 'conf'
and node.func.attrname == 'declare'):
return
conf_key_name = None
if node.args:
conf_key_name = node.args[0].value
else:
for keyword in node.keywords:
if keyword.arg == 'name':
# Assume the name is an astroid.Const(str), so it has a str value.
conf_key_name = keyword.value.value
break
assert conf_key_name != None, "Invalid conf.declare() syntax"
if CONF_NODE:
# Keep track of the current root, refreshing the locals if it changes.
if not CURRENT_ROOT or CURRENT_ROOT != node.root():
CURRENT_ROOT = node.root()
CONF_NODE.locals = CONF_LOCALS
CONF_NODE.locals[conf_key_name] = [None]
else:
CONF_LOCALS[conf_key_name] = [None] | python | def transform_declare(node):
"""Transform conf.declare calls by stashing the declared names."""
global CURRENT_ROOT
if not (isinstance(node.func, astroid.Attribute)
and isinstance(node.func.expr, astroid.Name)
and node.func.expr.name == 'conf'
and node.func.attrname == 'declare'):
return
conf_key_name = None
if node.args:
conf_key_name = node.args[0].value
else:
for keyword in node.keywords:
if keyword.arg == 'name':
# Assume the name is an astroid.Const(str), so it has a str value.
conf_key_name = keyword.value.value
break
assert conf_key_name != None, "Invalid conf.declare() syntax"
if CONF_NODE:
# Keep track of the current root, refreshing the locals if it changes.
if not CURRENT_ROOT or CURRENT_ROOT != node.root():
CURRENT_ROOT = node.root()
CONF_NODE.locals = CONF_LOCALS
CONF_NODE.locals[conf_key_name] = [None]
else:
CONF_LOCALS[conf_key_name] = [None] | [
"def",
"transform_declare",
"(",
"node",
")",
":",
"global",
"CURRENT_ROOT",
"if",
"not",
"(",
"isinstance",
"(",
"node",
".",
"func",
",",
"astroid",
".",
"Attribute",
")",
"and",
"isinstance",
"(",
"node",
".",
"func",
".",
"expr",
",",
"astroid",
".",... | Transform conf.declare calls by stashing the declared names. | [
"Transform",
"conf",
".",
"declare",
"calls",
"by",
"stashing",
"the",
"declared",
"names",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/pylint_plugins/conf_plugin.py#L30-L59 | train | 221,892 |
google/openhtf | pylint_plugins/conf_plugin.py | transform_conf_module | def transform_conf_module(cls):
"""Transform usages of the conf module by updating locals."""
global CONF_NODE
if cls.name == 'openhtf.conf':
# Put all the attributes in Configuration into the openhtf.conf node.
cls._locals.update(cls.locals['Configuration'][0].locals)
# Store reference to this node for future use.
CONF_NODE = cls
CONF_LOCALS.update(cls.locals) | python | def transform_conf_module(cls):
"""Transform usages of the conf module by updating locals."""
global CONF_NODE
if cls.name == 'openhtf.conf':
# Put all the attributes in Configuration into the openhtf.conf node.
cls._locals.update(cls.locals['Configuration'][0].locals)
# Store reference to this node for future use.
CONF_NODE = cls
CONF_LOCALS.update(cls.locals) | [
"def",
"transform_conf_module",
"(",
"cls",
")",
":",
"global",
"CONF_NODE",
"if",
"cls",
".",
"name",
"==",
"'openhtf.conf'",
":",
"# Put all the attributes in Configuration into the openhtf.conf node.",
"cls",
".",
"_locals",
".",
"update",
"(",
"cls",
".",
"locals"... | Transform usages of the conf module by updating locals. | [
"Transform",
"usages",
"of",
"the",
"conf",
"module",
"by",
"updating",
"locals",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/pylint_plugins/conf_plugin.py#L62-L72 | train | 221,893 |
google/openhtf | pylint_plugins/conf_plugin.py | register | def register(linter):
"""Register all transforms with the linter."""
MANAGER.register_transform(astroid.Call, transform_declare)
MANAGER.register_transform(astroid.Module, transform_conf_module) | python | def register(linter):
"""Register all transforms with the linter."""
MANAGER.register_transform(astroid.Call, transform_declare)
MANAGER.register_transform(astroid.Module, transform_conf_module) | [
"def",
"register",
"(",
"linter",
")",
":",
"MANAGER",
".",
"register_transform",
"(",
"astroid",
".",
"Call",
",",
"transform_declare",
")",
"MANAGER",
".",
"register_transform",
"(",
"astroid",
".",
"Module",
",",
"transform_conf_module",
")"
] | Register all transforms with the linter. | [
"Register",
"all",
"transforms",
"with",
"the",
"linter",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/pylint_plugins/conf_plugin.py#L75-L78 | train | 221,894 |
google/openhtf | openhtf/plugs/user_input.py | ConsolePrompt.run | def run(self):
"""Main logic for this thread to execute."""
if platform.system() == 'Windows':
# Windows doesn't support file-like objects for select(), so fall back
# to raw_input().
response = input(''.join((self._message,
os.linesep,
PROMPT)))
self._answered = True
self._callback(response)
return
# First, display the prompt to the console.
console_output.cli_print(self._message, color=self._color,
end=os.linesep, logger=None)
console_output.cli_print(PROMPT, color=self._color, end='', logger=None)
sys.stdout.flush()
# Before reading, clear any lingering buffered terminal input.
termios.tcflush(sys.stdin, termios.TCIFLUSH)
line = ''
while not self._stop_event.is_set():
inputs, _, _ = select.select([sys.stdin], [], [], 0.001)
if sys.stdin in inputs:
new = os.read(sys.stdin.fileno(), 1024)
if not new:
# Hit EOF!
# They hit ^D (to insert EOF). Tell them to hit ^C if they
# want to actually quit.
print('Hit ^C (Ctrl+c) to exit.')
break
line += new.decode('utf-8')
if '\n' in line:
response = line[:line.find('\n')]
self._answered = True
self._callback(response)
return | python | def run(self):
"""Main logic for this thread to execute."""
if platform.system() == 'Windows':
# Windows doesn't support file-like objects for select(), so fall back
# to raw_input().
response = input(''.join((self._message,
os.linesep,
PROMPT)))
self._answered = True
self._callback(response)
return
# First, display the prompt to the console.
console_output.cli_print(self._message, color=self._color,
end=os.linesep, logger=None)
console_output.cli_print(PROMPT, color=self._color, end='', logger=None)
sys.stdout.flush()
# Before reading, clear any lingering buffered terminal input.
termios.tcflush(sys.stdin, termios.TCIFLUSH)
line = ''
while not self._stop_event.is_set():
inputs, _, _ = select.select([sys.stdin], [], [], 0.001)
if sys.stdin in inputs:
new = os.read(sys.stdin.fileno(), 1024)
if not new:
# Hit EOF!
# They hit ^D (to insert EOF). Tell them to hit ^C if they
# want to actually quit.
print('Hit ^C (Ctrl+c) to exit.')
break
line += new.decode('utf-8')
if '\n' in line:
response = line[:line.find('\n')]
self._answered = True
self._callback(response)
return | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"# Windows doesn't support file-like objects for select(), so fall back",
"# to raw_input().",
"response",
"=",
"input",
"(",
"''",
".",
"join",
"(",
"(",
"self... | Main logic for this thread to execute. | [
"Main",
"logic",
"for",
"this",
"thread",
"to",
"execute",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/user_input.py#L90-L127 | train | 221,895 |
google/openhtf | openhtf/plugs/user_input.py | UserInput._asdict | def _asdict(self):
"""Return a dictionary representation of the current prompt."""
with self._cond:
if self._prompt is None:
return
return {'id': self._prompt.id,
'message': self._prompt.message,
'text-input': self._prompt.text_input} | python | def _asdict(self):
"""Return a dictionary representation of the current prompt."""
with self._cond:
if self._prompt is None:
return
return {'id': self._prompt.id,
'message': self._prompt.message,
'text-input': self._prompt.text_input} | [
"def",
"_asdict",
"(",
"self",
")",
":",
"with",
"self",
".",
"_cond",
":",
"if",
"self",
".",
"_prompt",
"is",
"None",
":",
"return",
"return",
"{",
"'id'",
":",
"self",
".",
"_prompt",
".",
"id",
",",
"'message'",
":",
"self",
".",
"_prompt",
"."... | Return a dictionary representation of the current prompt. | [
"Return",
"a",
"dictionary",
"representation",
"of",
"the",
"current",
"prompt",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/user_input.py#L146-L153 | train | 221,896 |
google/openhtf | openhtf/plugs/user_input.py | UserInput.remove_prompt | def remove_prompt(self):
"""Remove the prompt."""
with self._cond:
self._prompt = None
if self._console_prompt:
self._console_prompt.Stop()
self._console_prompt = None
self.notify_update() | python | def remove_prompt(self):
"""Remove the prompt."""
with self._cond:
self._prompt = None
if self._console_prompt:
self._console_prompt.Stop()
self._console_prompt = None
self.notify_update() | [
"def",
"remove_prompt",
"(",
"self",
")",
":",
"with",
"self",
".",
"_cond",
":",
"self",
".",
"_prompt",
"=",
"None",
"if",
"self",
".",
"_console_prompt",
":",
"self",
".",
"_console_prompt",
".",
"Stop",
"(",
")",
"self",
".",
"_console_prompt",
"=",
... | Remove the prompt. | [
"Remove",
"the",
"prompt",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/user_input.py#L158-L165 | train | 221,897 |
google/openhtf | openhtf/plugs/user_input.py | UserInput.prompt | def prompt(self, message, text_input=False, timeout_s=None, cli_color=''):
"""Display a prompt and wait for a response.
Args:
message: A string to be presented to the user.
text_input: A boolean indicating whether the user must respond with text.
timeout_s: Seconds to wait before raising a PromptUnansweredError.
cli_color: An ANSI color code, or the empty string.
Returns:
A string response, or the empty string if text_input was False.
Raises:
MultiplePromptsError: There was already an existing prompt.
PromptUnansweredError: Timed out waiting for the user to respond.
"""
self.start_prompt(message, text_input, cli_color)
return self.wait_for_prompt(timeout_s) | python | def prompt(self, message, text_input=False, timeout_s=None, cli_color=''):
"""Display a prompt and wait for a response.
Args:
message: A string to be presented to the user.
text_input: A boolean indicating whether the user must respond with text.
timeout_s: Seconds to wait before raising a PromptUnansweredError.
cli_color: An ANSI color code, or the empty string.
Returns:
A string response, or the empty string if text_input was False.
Raises:
MultiplePromptsError: There was already an existing prompt.
PromptUnansweredError: Timed out waiting for the user to respond.
"""
self.start_prompt(message, text_input, cli_color)
return self.wait_for_prompt(timeout_s) | [
"def",
"prompt",
"(",
"self",
",",
"message",
",",
"text_input",
"=",
"False",
",",
"timeout_s",
"=",
"None",
",",
"cli_color",
"=",
"''",
")",
":",
"self",
".",
"start_prompt",
"(",
"message",
",",
"text_input",
",",
"cli_color",
")",
"return",
"self",
... | Display a prompt and wait for a response.
Args:
message: A string to be presented to the user.
text_input: A boolean indicating whether the user must respond with text.
timeout_s: Seconds to wait before raising a PromptUnansweredError.
cli_color: An ANSI color code, or the empty string.
Returns:
A string response, or the empty string if text_input was False.
Raises:
MultiplePromptsError: There was already an existing prompt.
PromptUnansweredError: Timed out waiting for the user to respond. | [
"Display",
"a",
"prompt",
"and",
"wait",
"for",
"a",
"response",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/user_input.py#L167-L184 | train | 221,898 |
google/openhtf | openhtf/plugs/user_input.py | UserInput.start_prompt | def start_prompt(self, message, text_input=False, cli_color=''):
"""Display a prompt.
Args:
message: A string to be presented to the user.
text_input: A boolean indicating whether the user must respond with text.
cli_color: An ANSI color code, or the empty string.
Raises:
MultiplePromptsError: There was already an existing prompt.
Returns:
A string uniquely identifying the prompt.
"""
with self._cond:
if self._prompt:
raise MultiplePromptsError
prompt_id = uuid.uuid4().hex
_LOG.debug('Displaying prompt (%s): "%s"%s', prompt_id, message,
', Expects text input.' if text_input else '')
self._response = None
self._prompt = Prompt(
id=prompt_id, message=message, text_input=text_input)
if sys.stdin.isatty():
self._console_prompt = ConsolePrompt(
message, functools.partial(self.respond, prompt_id), cli_color)
self._console_prompt.start()
self.notify_update()
return prompt_id | python | def start_prompt(self, message, text_input=False, cli_color=''):
"""Display a prompt.
Args:
message: A string to be presented to the user.
text_input: A boolean indicating whether the user must respond with text.
cli_color: An ANSI color code, or the empty string.
Raises:
MultiplePromptsError: There was already an existing prompt.
Returns:
A string uniquely identifying the prompt.
"""
with self._cond:
if self._prompt:
raise MultiplePromptsError
prompt_id = uuid.uuid4().hex
_LOG.debug('Displaying prompt (%s): "%s"%s', prompt_id, message,
', Expects text input.' if text_input else '')
self._response = None
self._prompt = Prompt(
id=prompt_id, message=message, text_input=text_input)
if sys.stdin.isatty():
self._console_prompt = ConsolePrompt(
message, functools.partial(self.respond, prompt_id), cli_color)
self._console_prompt.start()
self.notify_update()
return prompt_id | [
"def",
"start_prompt",
"(",
"self",
",",
"message",
",",
"text_input",
"=",
"False",
",",
"cli_color",
"=",
"''",
")",
":",
"with",
"self",
".",
"_cond",
":",
"if",
"self",
".",
"_prompt",
":",
"raise",
"MultiplePromptsError",
"prompt_id",
"=",
"uuid",
"... | Display a prompt.
Args:
message: A string to be presented to the user.
text_input: A boolean indicating whether the user must respond with text.
cli_color: An ANSI color code, or the empty string.
Raises:
MultiplePromptsError: There was already an existing prompt.
Returns:
A string uniquely identifying the prompt. | [
"Display",
"a",
"prompt",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/user_input.py#L186-L216 | train | 221,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.