docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Create a new ADB stream.
Args:
destination: String identifier for the destination of this stream.
transport: AdbStreamTransport to use for reads/writes.
|
def __init__(self, destination, transport):
self._destination = destination
self._transport = transport
| 205,198
|
Write data to this stream.
Args:
data: Data to write.
timeout_ms: Timeout to use for the write/Ack transaction, in
milliseconds (or as a PolledTimeout object).
Raises:
AdbProtocolError: If an ACK is not received.
AdbStreamClosedError: If the stream is already closed, or gets closed
before the write completes.
|
def write(self, data, timeout_ms=None):
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
# Break the data up into our transport's maxdata sized WRTE messages.
while data:
self._transport.write(
data[:self._transport.adb_connection.maxdata], timeout)
data = data[self._transport.adb_connection.maxdata:]
| 205,200
|
Yield data until this stream is closed.
Args:
timeout_ms: Timeout in milliseconds to keep reading (or a PolledTimeout
object).
Yields:
Data read from a single call to self.read(), until the stream is closed
or timeout is reached.
Raises:
AdbTimeoutError: On timeout.
|
def read_until_close(self, timeout_ms=None):
while True:
try:
yield self.read(timeout_ms=timeout_ms)
except usb_exceptions.AdbStreamClosedError:
break
| 205,202
|
Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send.
|
def enqueue_message(self, message, timeout):
# Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued.
if message.command == 'WRTE':
self._send_command('OKAY', timeout=timeout)
elif message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
self.message_queue.put(message)
| 205,209
|
Read 'length' bytes from this stream transport.
Args:
length: If not 0, read this many bytes from the stream, otherwise read all
available data (at least one byte).
timeout: timeouts.PolledTimeout to use for this read operation.
Returns:
The bytes read from this stream.
|
def read(self, length, timeout):
self._read_messages_until_true(
lambda: self._buffer_size and self._buffer_size >= length, timeout)
with self._read_buffer_lock:
data, push_back = ''.join(self._read_buffer), ''
if length:
data, push_back = data[:length], data[length:]
self._read_buffer.clear()
self._buffer_size = len(push_back)
if push_back:
self._read_buffer.appendleft(push_back)
return data
| 205,211
|
Create an ADB connection to a device.
Args:
transport: AdbTransportAdapter to use for reading/writing AdbMessages
maxdata: Max data size the remote endpoint will accept.
remote_banner: Banner received from the remote endpoint.
|
def __init__(self, transport, maxdata, remote_banner):
try:
self.systemtype, self.serial, self.banner = remote_banner.split(':', 2)
except ValueError:
raise usb_exceptions.AdbProtocolError('Received malformed banner %s',
remote_banner)
self.transport = transport
self.maxdata = maxdata
self._last_id_used = 0
self._reader_lock = threading.Lock()
self._open_lock = threading.Lock()
# Maps local_id: AdbStreamTransport object for the relevant stream.
self._stream_transport_map = {}
self._stream_transport_map_lock = threading.RLock()
| 205,213
|
Instantiate required plugs.
Instantiates plug types and saves the instances in self._plugs_by_type for
use in provide_plugs().
Args:
plug_types: Plug types may be specified here rather than passed
into the constructor (this is used primarily for unit testing
phases).
|
def initialize_plugs(self, plug_types=None):
types = plug_types if plug_types is not None else self._plug_types
for plug_type in types:
# Create a logger for this plug. All plug loggers go under the 'plug'
# sub-logger in the logger hierarchy.
plug_logger = self.logger.getChild(plug_type.__name__)
if plug_type in self._plugs_by_type:
continue
try:
if not issubclass(plug_type, BasePlug):
raise InvalidPlugError(
'Plug type "%s" is not an instance of BasePlug' % plug_type)
if plug_type.logger != _LOG:
# They put a logger attribute on the class itself, overriding ours.
raise InvalidPlugError(
'Do not override "logger" in your plugs.', plug_type)
# Override the logger so that __init__'s logging goes into the record.
plug_type.logger = plug_logger
try:
plug_instance = plug_type()
finally:
# Now set it back since we'll give the instance a logger in a moment.
plug_type.logger = _LOG
# Set the logger attribute directly (rather than in BasePlug) so we
# don't depend on subclasses' implementation of __init__ to have it
# set.
if plug_instance.logger != _LOG:
raise InvalidPlugError(
'Do not set "self.logger" in __init__ in your plugs', plug_type)
else:
# Now the instance has its own copy of the test logger.
plug_instance.logger = plug_logger
except Exception: # pylint: disable=broad-except
plug_logger.exception('Exception instantiating plug type %s', plug_type)
self.tear_down_plugs()
raise
self.update_plug(plug_type, plug_instance)
| 205,229
|
Wait for a change in the state of a frontend-aware plug.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
remote_state: The last observed state.
timeout_s: Number of seconds to wait for an update.
Returns:
An updated state, or None if the timeout runs out.
Raises:
InvalidPlugError: The plug can't be waited on either because it's not in
use or it's not a frontend-aware plug.
|
def wait_for_plug_update(self, plug_name, remote_state, timeout_s):
plug = self._plugs_by_name.get(plug_name)
if plug is None:
raise InvalidPlugError('Cannot wait on unknown plug "%s".' % plug_name)
if not isinstance(plug, FrontendAwareBasePlug):
raise InvalidPlugError('Cannot wait on a plug %s that is not an subclass '
'of FrontendAwareBasePlug.' % plug_name)
state, update_event = plug.asdict_with_event()
if state != remote_state:
return state
if update_event.wait(timeout_s):
return plug._asdict()
| 205,233
|
Wait for any in a list of threading.Event's to be set.
Args:
events: List of threading.Event's.
timeout_s: Max duration in seconds to wait before returning.
Returns:
True if at least one event was set before the timeout expired, else False.
|
def _wait_for_any_event(events, timeout_s):
def any_event_set():
return any(event.is_set() for event in events)
result = timeouts.loop_until_timeout_or_true(
timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S)
return result or any_event_set()
| 205,236
|
Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record.
|
def emit(self, record):
try:
message = self.format(record)
log_record = LogRecord(
record.levelno, record.name, os.path.basename(record.pathname),
record.lineno, int(record.created * 1000), message,
)
self._test_record.add_log_record(log_record)
self._notify_update()
except Exception: # pylint: disable=broad-except
self.handleError(record)
| 205,283
|
Create an uploader given (parsed) JSON data.
Note that this is a JSON-formatted key file downloaded from Google when
the service account key is created, *NOT* a json-encoded
oauth2client.client.SignedJwtAssertionCredentials object.
Args:
json_data: Dict containing the loaded JSON key data.
Returns:
a MfgInspectorCallback with credentials.
|
def from_json(cls, json_data):
return cls(user=json_data['client_email'],
keydata=json_data['private_key'],
token_uri=json_data['token_uri'])
| 205,288
|
Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''.
|
def wait(self, timeout_ms=None):
closed = timeouts.loop_until_timeout_or_true(
timeouts.PolledTimeout.from_millis(timeout_ms),
self.stream.is_closed, .1)
if closed:
if hasattr(self.stdout, 'getvalue'):
return self.stdout.getvalue()
return True
return None
| 205,296
|
Initializes the configuration state.
We have to pull everything we need from global scope into here because we
will be swapping out the module with this instance and will lose any global
references.
Args:
logger: Logger to use for logging messages within this class.
lock: Threading.lock to use for locking access to config values.
**kwargs: Modules we need to access within this class.
|
def __init__(self, logger, lock, parser, **kwargs):
self._logger = logger
self._lock = lock
self._modules = kwargs
self._declarations = {}
self.ARG_PARSER = parser
# Parse just the flags we care about, since this happens at import time.
self._flags, _ = parser.parse_known_args()
self._flag_values = {}
# Populate flag_values from flags now.
self.load_flag_values()
# Initialize self._loaded_values and load from --config-file if it's set.
self.reset()
| 205,300
|
Load flag values given from command line flags.
Args:
flags: An argparse Namespace containing the command line flags.
|
def load_flag_values(self, flags=None):
if flags is None:
flags = self._flags
for keyval in flags.config_value:
k, v = keyval.split('=', 1)
v = self._modules['yaml'].load(v) if isinstance(v, str) else v
# Force any command line keys and values that are bytes to unicode.
k = k.decode() if isinstance(k, bytes) else k
v = v.decode() if isinstance(v, bytes) else v
self._flag_values.setdefault(k, v)
| 205,301
|
Get a config value via item access.
Order of precedence is:
- Value provided via --config-value flag.
- Value loaded via load*() methods.
- Default value as declared with conf.declare()
Args:
item: Config key name to get.
|
def __getitem__(self, item): # pylint: disable=invalid-name
if item not in self._declarations:
raise self.UndeclaredKeyError('Configuration key not declared', item)
if item in self._flag_values:
if item in self._loaded_values:
self._logger.warning(
'Overriding loaded value for %s (%s) with flag value: %s',
item, self._loaded_values[item], self._flag_values[item])
return self._flag_values[item]
if item in self._loaded_values:
return self._loaded_values[item]
if self._declarations[item].has_default:
return self._declarations[item].default_value
raise self.UnsetKeyError(
'Configuration value not set and has no default', item)
| 205,304
|
Declare a configuration key with the given name.
Args:
name: Configuration key to declare, must not have been already declared.
description: If provided, use this as the description for this key.
**kwargs: Other kwargs to pass to the Declaration, only default_value
is currently supported.
|
def declare(self, name, description=None, **kwargs):
if not self._is_valid_key(name):
raise self.InvalidKeyError(
'Invalid key name, must begin with a lowercase letter', name)
if name in self._declarations:
raise self.KeyAlreadyDeclaredError(
'Configuration key already declared', name)
self._declarations[name] = self.Declaration(
name, description=description, **kwargs)
| 205,306
|
Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML).
|
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):
self._logger.info('Loading configuration from file: %s', yamlfile)
try:
parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())
except self._modules['yaml'].YAMLError:
self._logger.exception('Problem parsing YAML')
raise self.ConfigurationInvalidError(
'Failed to load from %s as YAML' % yamlfile)
if not isinstance(parsed_yaml, dict):
# Parsed YAML, but it's not a dict.
raise self.ConfigurationInvalidError(
'YAML parsed, but wrong type, should be dict', parsed_yaml)
self._logger.debug('Configuration loaded from file: %s', parsed_yaml)
self.load_from_dict(
parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared)
| 205,308
|
Fallthrough to underlying FastbootProtocol handler.
Args:
attr: Attribute to get.
Returns:
Either the attribute from the device or a retrying function-wrapper
if attr is a method on the device.
|
def __getattr__(self, attr): # pylint: disable=invalid-name
if not self._protocol:
raise usb_exceptions.HandleClosedError()
val = getattr(self._protocol, attr)
if callable(val):
def _retry_wrapper(*args, **kwargs):
result = _retry_usb_function(self._num_retries, val, *args, **kwargs)
_LOG.debug('LIBUSB FASTBOOT: %s(*%s, **%s) -> %s',
attr, args, kwargs, result)
return result
return _retry_wrapper
return val
| 205,325
|
Logs the given message every n calls to a logger.
Args:
n: Number of calls before logging.
logger: The logger to which to log.
level: The logging level (e.g. logging.INFO).
message: A message to log
*args: Any format args for the message.
Returns:
A method that logs and returns True every n calls.
|
def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name
logger = logger or logging.getLogger()
def _gen(): # pylint: disable=missing-docstring
while True:
for _ in range(n):
yield False
logger.log(level, message, *args)
yield True
gen = _gen()
return lambda: six.next(gen)
| 205,379
|
Loops until the specified function returns non-None or until a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
|
def loop_until_timeout_or_not_none(timeout_s, function, sleep_s=1): # pylint: disable=invalid-name
return loop_until_timeout_or_valid(
timeout_s, function, lambda x: x is not None, sleep_s)
| 205,401
|
Executes a method forever at the specified interval.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object.
|
def execute_forever(method, interval_s): # pylint: disable=invalid-name
interval = Interval(method)
interval.start(interval_s)
return interval
| 205,403
|
Executes a method forever until the method returns a false value.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object.
|
def execute_until_false(method, interval_s): # pylint: disable=invalid-name
interval = Interval(method, stop_if_false=True)
interval.start(interval_s)
return interval
| 205,404
|
A function that returns whether a function call took less than time_s.
NOTE: The function call is not killed and will run indefinitely if hung.
Args:
time_s: Maximum amount of time to take.
func: Function to call.
*args: Arguments to call the function with.
**kwargs: Keyword arguments to call the function with.
Returns:
True if the function finished in less than time_s seconds.
|
def take_at_most_n_seconds(time_s, func, *args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
thread.join(time_s)
if thread.is_alive():
return False
return True
| 205,409
|
Construct a PolledTimeout object.
Args:
timeout_s: This may either be a number or None. If a number, this object
will consider to be expired after number seconds after construction. If
None, this object never expires.
|
def __init__(self, timeout_s):
self.start = time.time()
self.timeout_s = timeout_s
| 205,411
|
Create a new PolledTimeout if needed.
If timeout_ms is already a PolledTimeout, just return it, otherwise create a
new PolledTimeout with the given timeout in milliseconds.
Args:
timeout_ms: PolledTimeout object, or number of milliseconds to use for
creating a new one.
Returns:
A PolledTimeout object that will expire in timeout_ms milliseconds, which
may be timeout_ms itself, or a newly allocated PolledTimeout.
|
def from_millis(cls, timeout_ms):
if hasattr(timeout_ms, 'has_expired'):
return timeout_ms
if timeout_ms is None:
return cls(None)
return cls(timeout_ms / 1000.0)
| 205,412
|
Initializes the Interval.
Args:
method: A callable to execute, it should take no arguments.
stop_if_false: If True, the interval will exit if the method returns
False.
|
def __init__(self, method, stop_if_false=False):
self.method = method
self.stopped = threading.Event()
self.thread = None
self.stop_if_false = stop_if_false
| 205,413
|
Starts executing the method at the specified interval.
Args:
interval_s: The amount of time between executions of the method.
Returns:
False if the interval was already running.
|
def start(self, interval_s):
if self.running:
return False
self.stopped.clear()
def _execute():
# Always execute immediately once
if not self.method() and self.stop_if_false:
return
while not self.stopped.wait(interval_s):
if not self.method() and self.stop_if_false:
return
self.thread = threading.Thread(target=_execute)
self.thread.daemon = True
self.thread.start()
return True
| 205,414
|
Stops the interval.
If a timeout is provided and stop returns False then the thread is
effectively abandoned in whatever state it was in (presumably dead-locked).
Args:
timeout_s: The time in seconds to wait on the thread to finish. By
default it's forever.
Returns:
False if a timeout was provided and we timed out.
|
def stop(self, timeout_s=None):
self.stopped.set()
if self.thread:
self.thread.join(timeout_s)
return not self.thread.isAlive()
else:
return True
| 205,415
|
Joins blocking until the interval ends or until timeout is reached.
Args:
timeout_s: The time in seconds to wait, defaults to forever.
Returns:
True if the interval is still running and we reached the timeout.
|
def join(self, timeout_s=None):
if not self.thread:
return False
self.thread.join(timeout_s)
return self.running
| 205,416
|
Returns an OpenHTF phase for use as a prompt-based start trigger.
Args:
message: The message to display to the user.
timeout_s: Seconds to wait before raising a PromptUnansweredError.
validator: Function used to validate or modify the serial number.
cli_color: An ANSI color code, or the empty string.
|
def prompt_for_test_start(
message='Enter a DUT ID in order to start the test.', timeout_s=60*60*24,
validator=lambda sn: sn, cli_color=''):
@PhaseOptions(timeout_s=timeout_s)
@plugs.plug(prompts=UserInput)
def trigger_phase(test, prompts):
dut_id = prompts.prompt(
message, text_input=True, timeout_s=timeout_s, cli_color=cli_color)
test.test_record.dut_id = validator(dut_id)
return trigger_phase
| 205,421
|
Initializes a ConsolePrompt.
Args:
message: A string to be presented to the user.
callback: A function to be called with the response string.
color: An ANSI color code, or the empty string.
|
def __init__(self, message, callback, color=''):
super(ConsolePrompt, self).__init__()
self.daemon = True
self._message = message
self._callback = callback
self._color = color
self._stop_event = threading.Event()
self._answered = False
| 205,422
|
Display a prompt.
Args:
message: A string to be presented to the user.
text_input: A boolean indicating whether the user must respond with text.
cli_color: An ANSI color code, or the empty string.
Raises:
MultiplePromptsError: There was already an existing prompt.
Returns:
A string uniquely identifying the prompt.
|
def start_prompt(self, message, text_input=False, cli_color=''):
with self._cond:
if self._prompt:
raise MultiplePromptsError
prompt_id = uuid.uuid4().hex
_LOG.debug('Displaying prompt (%s): "%s"%s', prompt_id, message,
', Expects text input.' if text_input else '')
self._response = None
self._prompt = Prompt(
id=prompt_id, message=message, text_input=text_input)
if sys.stdin.isatty():
self._console_prompt = ConsolePrompt(
message, functools.partial(self.respond, prompt_id), cli_color)
self._console_prompt.start()
self.notify_update()
return prompt_id
| 205,428
|
Wait for the user to respond to the current prompt.
Args:
timeout_s: Seconds to wait before raising a PromptUnansweredError.
Returns:
A string response, or the empty string if text_input was False.
Raises:
PromptUnansweredError: Timed out waiting for the user to respond.
|
def wait_for_prompt(self, timeout_s=None):
with self._cond:
if self._prompt:
if timeout_s is None:
self._cond.wait(3600 * 24 * 365)
else:
self._cond.wait(timeout_s)
if self._response is None:
raise PromptUnansweredError
return self._response
| 205,429
|
Respond to the prompt with the given ID.
If there is no active prompt or the given ID doesn't match the active
prompt, do nothing.
Args:
prompt_id: A string uniquely identifying the prompt.
response: A string response to the given prompt.
Returns:
True if the prompt with the given ID was active, otherwise False.
|
def respond(self, prompt_id, response):
_LOG.debug('Responding to prompt (%s): "%s"', prompt_id, response)
with self._cond:
if not (self._prompt and self._prompt.id == prompt_id):
return False
self._response = response
self.last_response = (prompt_id, response)
self.remove_prompt()
self._cond.notifyAll()
return True
| 205,430
|
Executes a phase or skips it, yielding PhaseExecutionOutcome instances.
Args:
phase: Phase to execute.
Returns:
The final PhaseExecutionOutcome that wraps the phase return value
(or exception) of the final phase run. All intermediary results, if any,
are REPEAT and handled internally. Returning REPEAT here means the phase
hit its limit for repetitions.
|
def execute_phase(self, phase):
repeat_count = 1
repeat_limit = phase.options.repeat_limit or sys.maxsize
while not self._stopping.is_set():
is_last_repeat = repeat_count >= repeat_limit
phase_execution_outcome = self._execute_phase_once(phase, is_last_repeat)
if phase_execution_outcome.is_repeat and not is_last_repeat:
repeat_count += 1
continue
return phase_execution_outcome
# We've been cancelled, so just 'timeout' the phase.
return PhaseExecutionOutcome(None)
| 205,439
|
Stops execution of the current phase, if any.
It will raise a ThreadTerminationError, which will cause the test to stop
executing and terminate with an ERROR state.
Args:
timeout_s: int or None, timeout in seconds to wait for the phase to stop.
|
def stop(self, timeout_s=None):
self._stopping.set()
with self._current_phase_thread_lock:
phase_thread = self._current_phase_thread
if not phase_thread:
return
if phase_thread.is_alive():
phase_thread.kill()
_LOG.debug('Waiting for cancelled phase to exit: %s', phase_thread)
timeout = timeouts.PolledTimeout.from_seconds(timeout_s)
while phase_thread.is_alive() and not timeout.has_expired():
time.sleep(0.1)
_LOG.debug('Cancelled phase %s exit',
"didn't" if phase_thread.is_alive() else 'did')
# Clear the currently running phase, whether it finished or timed out.
self.test_state.stop_running_phase()
| 205,441
|
Apply only the args that the phase knows.
If the phase has a **kwargs-style argument, it counts as knowing all args.
Args:
phase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or
iterable of those, the phase or phase group (or iterable) to apply
with_args to.
**kwargs: arguments to apply to the phase.
Returns:
phase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated
args.
|
def optionally_with_args(phase, **kwargs):
if isinstance(phase, PhaseGroup):
return phase.with_args(**kwargs)
if isinstance(phase, collections.Iterable):
return [optionally_with_args(p, **kwargs) for p in phase]
if not isinstance(phase, phase_descriptor.PhaseDescriptor):
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(phase)
return phase.with_known_args(**kwargs)
| 205,444
|
Publish messages to subscribers.
Args:
message: The message to publish.
client_filter: A filter function to call passing in each client. Only
clients for whom the function returns True will have the
message sent to them.
|
def publish(cls, message, client_filter=None):
with cls._lock:
for client in cls.subscribers:
if (not client_filter) or client_filter(client):
client.send(message)
| 205,456
|
Return a new PhaseDescriptor from the given function or instance.
We want to return a new copy so that you can reuse a phase with different
options, plugs, measurements, etc.
Args:
func: A phase function or PhaseDescriptor instance.
**options: Options to update on the result.
Raises:
PhaseWrapError: if func is a openhtf.PhaseGroup.
Returns:
A new PhaseDescriptor object.
|
def wrap_or_copy(cls, func, **options):
if isinstance(func, openhtf.PhaseGroup):
raise PhaseWrapError('Cannot wrap PhaseGroup <%s> as a phase.' % (
func.name or 'Unnamed'))
if isinstance(func, cls):
# We want to copy so that a phase can be reused with different options
# or kwargs. See with_args() below for more details.
retval = mutablerecords.CopyRecord(func)
else:
retval = cls(func)
retval.options.update(**options)
return retval
| 205,469
|
Decorator to ensure a handle is open for certain methods.
Subclasses should decorate their Read() and Write() with this rather than
checking their own internal state, keeping all "is this handle open" logic
in is_closed().
Args:
method: A class method on a subclass of UsbHandle
Raises:
HandleClosedError: If this handle has been closed.
Returns:
A wrapper around method that ensures the handle is open before calling through
to the wrapped method.
|
def requires_open_handle(method): # pylint: disable=invalid-name
@functools.wraps(method)
def wrapper_requiring_open_handle(self, *args, **kwargs):
if self.is_closed():
raise usb_exceptions.HandleClosedError()
return method(self, *args, **kwargs)
return wrapper_requiring_open_handle
| 205,475
|
Construct a EtherSync object.
Args:
mac_addr: mac address of the Cambrionix unit for EtherSync.
|
def __init__(self, mac_addr):
addr_info = mac_addr.lower().split(':')
if len(addr_info) < 6:
raise ValueError('Invalid mac address')
addr_info[2] = 'EtherSync'
self._addr = ''.join(addr_info[2:])
| 205,482
|
Get the device serial number
Args:
port_num: port number on the Cambrionix unit
Return:
usb device serial number
|
def get_usb_serial(self, port_num):
port = self.port_map[str(port_num)]
arg = ''.join(['DEVICE INFO,', self._addr, '.', port])
cmd = (['esuit64', '-t', arg])
info = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
serial = None
if "SERIAL" in info:
serial_info = info.split('SERIAL:')[1]
serial = serial_info.split('\n')[0].strip()
use_info = info.split('BY')[1].split(' ')[1]
if use_info == 'NO':
cmd = (['esuit64', '-t', 'AUTO USE ALL'])
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
time.sleep(50.0/1000.0)
else:
raise ValueError('No USB device detected')
return serial
| 205,483
|
open usb port
Args:
port_num: port number on the Cambrionix unit
Return:
usb handle
|
def open_usb_handle(self, port_num):
serial = self.get_usb_serial(port_num)
return local_usb.LibUsbHandle.open(serial_number=serial)
| 205,484
|
Print the error message to the file in the specified color.
Args:
msg: The error message to be printed.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together here, but note that style
strings will not be applied.
file: A file object to which the baracketed text will be written. Intended
for use with CLI output file objects, specifically sys.stderr.
|
def error_print(msg, color=colorama.Fore.RED, file=sys.stderr):
if CLI_QUIET:
return
file.write('{sep}{bright}{color}Error: {normal}{msg}{sep}{reset}'.format(
sep=_linesep_for_file(file), bright=colorama.Style.BRIGHT, color=color,
normal=colorama.Style.NORMAL, msg=msg, reset=colorama.Style.RESET_ALL))
file.flush()
| 205,491
|
Create a new consumer for a queue.
Parameters:
queue_name(str): The queue to consume.
prefetch(int): The number of messages to prefetch.
timeout(int): The idle timeout in milliseconds.
Returns:
Consumer: A consumer that retrieves messages from Redis.
|
def consume(self, queue_name, prefetch=1, timeout=5000):
return _RedisConsumer(self, queue_name, prefetch, timeout)
| 205,946
|
Enqueue a message.
Parameters:
message(Message): The message to enqueue.
delay(int): The minimum amount of time, in milliseconds, to
delay the message by. Must be less than 7 days.
Raises:
ValueError: If ``delay`` is longer than 7 days.
|
def enqueue(self, message, *, delay=None):
queue_name = message.queue_name
# Each enqueued message must have a unique id in Redis so
# using the Message's id isn't safe because messages may be
# retried.
message = message.copy(options={
"redis_message_id": str(uuid4()),
})
if delay is not None:
queue_name = dq_name(queue_name)
message_eta = current_millis() + delay
message = message.copy(
queue_name=queue_name,
options={
"eta": message_eta,
},
)
self.logger.debug("Enqueueing message %r on queue %r.", message.message_id, queue_name)
self.emit_before("enqueue", message, delay)
self.do_enqueue(queue_name, message.options["redis_message_id"], message.encode())
self.emit_after("enqueue", message, delay)
return message
| 205,947
|
Drop all the messages from a queue.
Parameters:
queue_name(str): The queue to flush.
|
def flush(self, queue_name):
for name in (queue_name, dq_name(queue_name)):
self.do_purge(name)
| 205,948
|
Get a result from the backend.
Parameters:
message(Message)
block(bool): Whether or not to block until a result is set.
timeout(int): The maximum amount of time, in ms, to wait for
a result when block is True. Defaults to 10 seconds.
Raises:
ResultMissing: When block is False and the result isn't set.
ResultTimeout: When waiting for a result times out.
Returns:
object: The result.
|
def get_result(self, message, *, block: bool = False, timeout: int = None) -> Result:
if timeout is None:
timeout = DEFAULT_TIMEOUT
end_time = time.monotonic() + timeout / 1000
message_key = self.build_message_key(message)
attempts = 0
while True:
result = self._get(message_key)
if result is Missing and block:
attempts, delay = compute_backoff(attempts, factor=BACKOFF_FACTOR)
delay /= 1000
if time.monotonic() + delay > end_time:
raise ResultTimeout(message)
time.sleep(delay)
continue
elif result is Missing:
raise ResultMissing(message)
else:
return result
| 205,961
|
Store a result in the backend.
Parameters:
message(Message)
result(object): Must be serializable.
ttl(int): The maximum amount of time the result may be
stored in the backend for.
|
def store_result(self, message, result: Result, ttl: int) -> None:
message_key = self.build_message_key(message)
return self._store(message_key, result, ttl)
| 205,962
|
Given a message, return its globally-unique key.
Parameters:
message(Message)
Returns:
str
|
def build_message_key(self, message) -> str:
message_key = "%(namespace)s:%(queue_name)s:%(actor_name)s:%(message_id)s" % {
"namespace": self.namespace,
"queue_name": q_name(message.queue_name),
"actor_name": message.actor_name,
"message_id": message.message_id,
}
return hashlib.md5(message_key.encode("utf-8")).hexdigest()
| 205,963
|
Attempt to acquire a slot under this rate limiter.
Parameters:
raise_on_failure(bool): Whether or not failures should raise an
exception. If this is false, the context manager will instead
return a boolean value representing whether or not the rate
limit slot was acquired.
Returns:
bool: Whether or not the slot could be acquired.
|
def acquire(self, *, raise_on_failure=True):
acquired = False
try:
acquired = self._acquire()
if raise_on_failure and not acquired:
raise RateLimitExceeded("rate limit exceeded for key %(key)r" % vars(self))
yield acquired
finally:
if acquired:
self._release()
| 205,966
|
Compute an exponential backoff value based on some number of attempts.
Parameters:
attempts(int): The number of attempts there have been so far.
factor(int): The number of milliseconds to multiply each backoff by.
max_backoff(int): The max number of milliseconds to backoff by.
max_exponent(int): The maximum backoff exponent.
Returns:
tuple: The new number of attempts and the backoff in milliseconds.
|
def compute_backoff(attempts, *, factor=5, jitter=True, max_backoff=2000, max_exponent=32):
exponent = min(attempts, max_exponent)
backoff = min(factor * 2 ** exponent, max_backoff)
if jitter:
backoff /= 2
backoff = int(backoff + uniform(0, backoff))
return attempts + 1, backoff
| 206,004
|
The join() method of standard queues in Python doesn't support
timeouts. This implements the same functionality as that method,
with optional timeout support, by depending the internals of
Queue.
Raises:
QueueJoinTimeout: When the timeout is reached.
Parameters:
timeout(Optional[float])
|
def join_queue(queue, timeout=None):
with queue.all_tasks_done:
while queue.unfinished_tasks:
finished_in_time = queue.all_tasks_done.wait(timeout=timeout)
if not finished_in_time:
raise QueueJoinTimeout("timed out after %.02f seconds" % timeout)
| 206,005
|
Wait on a list of objects that can be joined with a total
timeout represented by ``timeout``.
Parameters:
joinables(object): Objects with a join method.
timeout(int): The total timeout in milliseconds.
|
def join_all(joinables, timeout):
started, elapsed = current_millis(), 0
for ob in joinables:
ob.join(timeout=timeout / 1000)
elapsed = current_millis() - started
timeout = max(0, timeout - elapsed)
| 206,006
|
Declare a new actor on this broker. Declaring an Actor
twice replaces the first actor with the second by name.
Parameters:
actor(Actor): The actor being declared.
|
def declare_actor(self, actor): # pragma: no cover
self.emit_before("declare_actor", actor)
self.declare_queue(actor.queue_name)
self.actors[actor.actor_name] = actor
self.emit_after("declare_actor", actor)
| 206,014
|
Alias for the RabbitMQ broker that takes a connection URL as a
positional argument.
Parameters:
url(str): A connection string.
middleware(list[Middleware]): The middleware to add to this
broker.
|
def URLRabbitmqBroker(url, *, middleware=None):
warnings.warn(
"Use RabbitmqBroker with the 'url' parameter instead of URLRabbitmqBroker.",
DeprecationWarning, stacklevel=2,
)
return RabbitmqBroker(url=url, middleware=middleware)
| 206,022
|
Create a new consumer for a queue.
Parameters:
queue_name(str): The queue to consume.
prefetch(int): The number of messages to prefetch.
timeout(int): The idle timeout in milliseconds.
Returns:
Consumer: A consumer that retrieves messages from RabbitMQ.
|
def consume(self, queue_name, prefetch=1, timeout=5000):
return _RabbitmqConsumer(self.parameters, queue_name, prefetch, timeout)
| 206,029
|
Declare a queue. Has no effect if a queue with the given
name already exists.
Parameters:
queue_name(str): The name of the new queue.
Raises:
ConnectionClosed: If the underlying channel or connection
has been closed.
|
def declare_queue(self, queue_name):
attempts = 1
while True:
try:
if queue_name not in self.queues:
self.emit_before("declare_queue", queue_name)
self._declare_queue(queue_name)
self.queues.add(queue_name)
self.emit_after("declare_queue", queue_name)
delayed_name = dq_name(queue_name)
self._declare_dq_queue(queue_name)
self.delay_queues.add(delayed_name)
self.emit_after("declare_delay_queue", delayed_name)
self._declare_xq_queue(queue_name)
break
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError) as e: # pragma: no cover
# Delete the channel and the connection so that the next
# caller may initiate new ones of each.
del self.channel
del self.connection
attempts += 1
if attempts > MAX_DECLARE_ATTEMPTS:
raise ConnectionClosed(e) from None
self.logger.debug(
"Retrying declare due to closed connection. [%d/%d]",
attempts, MAX_DECLARE_ATTEMPTS,
)
| 206,030
|
Enqueue a message.
Parameters:
message(Message): The message to enqueue.
delay(int): The minimum amount of time, in milliseconds, to
delay the message by.
Raises:
ConnectionClosed: If the underlying channel or connection
has been closed.
|
def enqueue(self, message, *, delay=None):
queue_name = message.queue_name
properties = pika.BasicProperties(
delivery_mode=2,
priority=message.options.get("broker_priority"),
)
if delay is not None:
queue_name = dq_name(queue_name)
message_eta = current_millis() + delay
message = message.copy(
queue_name=queue_name,
options={
"eta": message_eta,
},
)
attempts = 1
while True:
try:
self.logger.debug("Enqueueing message %r on queue %r.", message.message_id, queue_name)
self.emit_before("enqueue", message, delay)
self.channel.basic_publish(
exchange="",
routing_key=queue_name,
body=message.encode(),
properties=properties,
)
self.emit_after("enqueue", message, delay)
return message
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError) as e:
# Delete the channel and the connection so that the
# next caller/attempt may initiate new ones of each.
del self.channel
del self.connection
attempts += 1
if attempts > MAX_ENQUEUE_ATTEMPTS:
raise ConnectionClosed(e) from None
self.logger.debug(
"Retrying enqueue due to closed connection. [%d/%d]",
attempts, MAX_ENQUEUE_ATTEMPTS,
)
| 206,035
|
Get the number of messages in a queue. This method is only
meant to be used in unit and integration tests.
Parameters:
queue_name(str): The queue whose message counts to get.
Returns:
tuple: A triple representing the number of messages in the
queue, its delayed queue and its dead letter queue.
|
def get_queue_message_counts(self, queue_name):
queue_response = self._declare_queue(queue_name)
dq_queue_response = self._declare_dq_queue(queue_name)
xq_queue_response = self._declare_xq_queue(queue_name)
return (
queue_response.method.message_count,
dq_queue_response.method.message_count,
xq_queue_response.method.message_count,
)
| 206,036
|
Drop all the messages from a queue.
Parameters:
queue_name(str): The queue to flush.
|
def flush(self, queue_name):
for name in (queue_name, dq_name(queue_name), xq_name(queue_name)):
self.channel.queue_purge(name)
| 206,037
|
Create the barrier for the given number of parties.
Parameters:
parties(int): The number of parties to wait for.
Returns:
bool: Whether or not the new barrier was successfully created.
|
def create(self, parties):
assert parties > 0, "parties must be a positive integer."
return self.backend.add(self.key, parties, self.ttl)
| 206,046
|
Create a new consumer for a queue.
Parameters:
queue_name(str): The queue to consume.
prefetch(int): The number of messages to prefetch.
timeout(int): The idle timeout in milliseconds.
Raises:
QueueNotFound: If the queue hasn't been declared.
Returns:
Consumer: A consumer that retrieves messages from Redis.
|
def consume(self, queue_name, prefetch=1, timeout=100):
try:
return _StubConsumer(
self.queues[queue_name],
self.dead_letters_by_queue[queue_name],
timeout,
)
except KeyError:
raise QueueNotFound(queue_name)
| 206,074
|
Declare a queue. Has no effect if a queue with the given
name has already been declared.
Parameters:
queue_name(str): The name of the new queue.
|
def declare_queue(self, queue_name):
if queue_name not in self.queues:
self.emit_before("declare_queue", queue_name)
self.queues[queue_name] = Queue()
self.emit_after("declare_queue", queue_name)
delayed_name = dq_name(queue_name)
self.queues[delayed_name] = Queue()
self.delay_queues.add(delayed_name)
self.emit_after("declare_delay_queue", delayed_name)
| 206,075
|
Enqueue a message.
Parameters:
message(Message): The message to enqueue.
delay(int): The minimum amount of time, in milliseconds, to
delay the message by.
Raises:
QueueNotFound: If the queue the message is being enqueued on
doesn't exist.
|
def enqueue(self, message, *, delay=None):
queue_name = message.queue_name
if delay is not None:
queue_name = dq_name(queue_name)
message_eta = current_millis() + delay
message = message.copy(
queue_name=queue_name,
options={
"eta": message_eta,
},
)
if queue_name not in self.queues:
raise QueueNotFound(queue_name)
self.emit_before("enqueue", message, delay)
self.queues[queue_name].put(message.encode())
self.emit_after("enqueue", message, delay)
return message
| 206,076
|
Drop all the messages from a queue.
Parameters:
queue_name(str): The queue to flush.
|
def flush(self, queue_name):
for _ in iter_queue(self.queues[queue_name]):
self.queues[queue_name].task_done()
| 206,077
|
Run this pipeline.
Parameters:
delay(int): The minimum amount of time, in milliseconds, the
pipeline should be delayed by.
Returns:
pipeline: Itself.
|
def run(self, *, delay=None):
self.broker.enqueue(self.messages[0], delay=delay)
return self
| 206,102
|
Get the results of each job in the pipeline.
Parameters:
block(bool): Whether or not to block until a result is set.
timeout(int): The maximum amount of time, in ms, to wait for
a result when block is True. Defaults to 10 seconds.
Raises:
ResultMissing: When block is False and the result isn't set.
ResultTimeout: When waiting for a result times out.
Returns:
A result generator.
|
def get_results(self, *, block=False, timeout=None):
deadline = None
if timeout:
deadline = time.monotonic() + timeout / 1000
for message in self.messages:
if deadline:
timeout = max(0, int((deadline - time.monotonic()) * 1000))
yield message.get_result(block=block, timeout=timeout)
| 206,104
|
Run the actors in this group.
Parameters:
delay(int): The minimum amount of time, in milliseconds,
each message in the group should be delayed by.
|
def run(self, *, delay=None):
for child in self.children:
if isinstance(child, (group, pipeline)):
child.run(delay=delay)
else:
self.broker.enqueue(child, delay=delay)
return self
| 206,107
|
Get the results of each job in the group.
Parameters:
block(bool): Whether or not to block until the results are stored.
timeout(int): The maximum amount of time, in milliseconds,
to wait for results when block is True. Defaults to 10
seconds.
Raises:
ResultMissing: When block is False and the results aren't set.
ResultTimeout: When waiting for results times out.
Returns:
A result generator.
|
def get_results(self, *, block=False, timeout=None):
deadline = None
if timeout:
deadline = time.monotonic() + timeout / 1000
for child in self.children:
if deadline:
timeout = max(0, int((deadline - time.monotonic()) * 1000))
if isinstance(child, group):
yield list(child.get_results(block=block, timeout=timeout))
else:
yield child.get_result(block=block, timeout=timeout)
| 206,108
|
Block until all the jobs in the group have finished or
until the timeout expires.
Parameters:
timeout(int): The maximum amount of time, in ms, to wait.
Defaults to 10 seconds.
|
def wait(self, *, timeout=None):
for _ in self.get_results(block=True, timeout=timeout): # pragma: no cover
pass
| 206,109
|
Build a message. This method is useful if you want to
compose actors. See the actor composition documentation for
details.
Parameters:
*args(tuple): Positional arguments to send to the actor.
**kwargs(dict): Keyword arguments to send to the actor.
Examples:
>>> (add.message(1, 2) | add.message(3))
pipeline([add(1, 2), add(3)])
Returns:
Message: A message that can be enqueued on a broker.
|
def message(self, *args, **kwargs):
return self.message_with_options(args=args, kwargs=kwargs)
| 206,112
|
Asynchronously send a message to this actor.
Parameters:
*args(tuple): Positional arguments to send to the actor.
**kwargs(dict): Keyword arguments to send to the actor.
Returns:
Message: The enqueued message.
|
def send(self, *args, **kwargs):
return self.send_with_options(args=args, kwargs=kwargs)
| 206,114
|
Synchronously call this actor.
Parameters:
*args: Positional arguments to send to the actor.
**kwargs: Keyword arguments to send to the actor.
Returns:
Whatever the underlying function backing this actor returns.
|
def __call__(self, *args, **kwargs):
try:
self.logger.debug("Received args=%r kwargs=%r.", args, kwargs)
start = time.perf_counter()
return self.fn(*args, **kwargs)
finally:
delta = time.perf_counter() - start
self.logger.debug("Completed after %.02fms.", delta * 1000)
| 206,116
|
Gracefully stop the Worker and all of its consumers and
workers.
Parameters:
timeout(int): The number of milliseconds to wait for
everything to shut down.
|
def stop(self, timeout=600000):
self.broker.emit_before("worker_shutdown", self)
self.logger.info("Shutting down...")
# Stop workers before consumers. The consumers are kept alive
# during this process so that heartbeats keep being sent to
# the broker while workers finish their current tasks.
self.logger.debug("Stopping workers...")
for thread in self.workers:
thread.stop()
join_all(self.workers, timeout)
self.logger.debug("Workers stopped.")
self.logger.debug("Stopping consumers...")
for thread in self.consumers.values():
thread.stop()
join_all(self.consumers.values(), timeout)
self.logger.debug("Consumers stopped.")
self.logger.debug("Requeueing in-memory messages...")
messages_by_queue = defaultdict(list)
for _, message in iter_queue(self.work_queue):
messages_by_queue[message.queue_name].append(message)
for queue_name, messages in messages_by_queue.items():
try:
self.consumers[queue_name].requeue_messages(messages)
except ConnectionError:
self.logger.warning("Failed to requeue messages on queue %r.", queue_name, exc_info=True)
self.logger.debug("Done requeueing in-progress messages.")
self.logger.debug("Closing consumers...")
for consumer in self.consumers.values():
consumer.close()
self.logger.debug("Consumers closed.")
self.broker.emit_after("worker_shutdown", self)
self.logger.info("Worker has been shut down.")
| 206,123
|
Process a message pulled off of the work queue then push it
back to its associated consumer for post processing.
Parameters:
message(MessageProxy)
|
def process_message(self, message):
try:
self.logger.debug("Received message %s with id %r.", message, message.message_id)
self.broker.emit_before("process_message", message)
res = None
if not message.failed:
actor = self.broker.get_actor(message.actor_name)
res = actor(*message.args, **message.kwargs)
self.broker.emit_after("process_message", message, result=res)
except SkipMessage:
self.logger.warning("Message %s was skipped.", message)
self.broker.emit_after("skip_message", message)
except BaseException as e:
# Stuff the exception into the message [proxy] so that it
# may be used by the stub broker to provide a nicer
# testing experience.
message.stuff_exception(e)
if isinstance(e, RateLimitExceeded):
self.logger.warning("Rate limit exceeded in message %s: %s.", message, e)
else:
self.logger.warning("Failed to process message %s with unhandled exception.", message, exc_info=True)
self.broker.emit_after("process_message", message, exception=e)
finally:
# NOTE: There is no race here as any message that was
# processed must have come off of a consumer. Therefore,
# there has to be a consumer for that message's queue so
# this is safe. Probably.
self.consumers[message.queue_name].post_process_message(message)
self.work_queue.task_done()
| 206,138
|
Create a scanner for running scanning commands synchronously.
Args:
network_retries: How many times SSLyze should retry a connection that timed out.
network_timeout: The time until an ongoing connection times out.
|
def __init__(
self,
network_retries: int = DEFAULT_NETWORK_RETRIES,
network_timeout: int = DEFAULT_NETWORK_TIMEOUT
) -> None:
self._plugins_repository = PluginsRepository()
# Set global network settings
SslConnection.set_global_network_settings(network_retries, network_timeout)
| 206,607
|
Queue a scan command targeting a specific server.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
|
def queue_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> None:
# Ensure we have the right processes and queues in place for this hostname
self._check_and_create_process(server_info.hostname)
# Add the task to the right queue
self._queued_tasks_nb += 1
if scan_command.is_aggressive:
# Aggressive commands should not be run in parallel against
# a given server so we use the priority queues to prevent this
self._hostname_queues_dict[server_info.hostname].put((server_info, scan_command))
else:
# Normal commands get put in the standard/shared queue
self._task_queue.put((server_info, scan_command))
| 206,643
|
The WSGI Application Server.
Arguments:
environ {dict} -- The WSGI environ dictionary
start_response {WSGI callable}
Returns:
WSGI Response
|
def app(environ, start_response):
from wsgi import container
container.bind('Environ', environ)
try:
for provider in container.make('WSGIProviders'):
container.resolve(provider.boot)
except Exception as e:
container.make('ExceptionHandler').load_exception(e)
start_response(container.make('Request').get_status_code(),
container.make('Request').get_and_reset_headers())
return iter([bytes(container.make('Response'), 'utf-8')])
| 206,990
|
Show the welcome page.
Arguments:
view {masonite.view.View} -- The Masonite view class.
Application {config.application} -- The application config module.
Returns:
masonite.view.View -- The Masonite view class.
|
def show(self, view: View, request: Request):
return view.render('welcome', {
'app': request.app().make('Application')
})
| 206,993
|
Compile the PyMC3 model from an abstract model specification.
Args:
spec (Model): A bambi Model instance containing the abstract
specification of the model to compile.
reset (bool): if True (default), resets the PyMC3BackEnd instance
before compiling.
|
def build(self, spec, reset=True):
if reset:
self.reset()
with self.model:
self.mu = 0.
for t in spec.terms.values():
data = t.data
label = t.name
dist_name = t.prior.name
dist_args = t.prior.args
n_cols = t.data.shape[1]
coef = self._build_dist(spec, label, dist_name,
shape=n_cols, **dist_args)
if t.random:
self.mu += coef[t.group_index][:, None] * t.predictor
else:
self.mu += pm.math.dot(data, coef)[:, None]
y = spec.y.data
y_prior = spec.family.prior
link_f = spec.family.link
if isinstance(link_f, string_types):
link_f = self.links[link_f]
else:
link_f = link_f
y_prior.args[spec.family.parent] = link_f(self.mu)
y_prior.args['observed'] = y
y_like = self._build_dist(spec, spec.y.name, y_prior.name,
**y_prior.args)
self.spec = spec
| 207,213
|
Compute autocorrelation using FFT for every lag for the input array
https://en.wikipedia.org/wiki/Autocorrelation#Efficient_computation.
Args:
x (array-like): An array containing MCMC samples.
Returns:
np.ndarray: An array of the same size as the input array.
|
def autocorr(x):
y = x - x.mean()
n = len(y)
result = fftconvolve(y, y[::-1])
acorr = result[len(result) // 2:]
acorr /= np.arange(n, 0, -1)
acorr /= acorr[0]
return acorr
| 207,234
|
Compute autocovariance estimates for every lag for the input array.
Args:
x (array-like): An array containing MCMC samples.
Returns:
np.ndarray: An array of the same size as the input array.
|
def autocov(x):
acorr = autocorr(x)
varx = np.var(x, ddof=1) * (len(x) - 1) / len(x)
acov = acorr * varx
return acov
| 207,235
|
Set up the model for sampling/fitting.
Performs any steps that require access to all model terms (e.g., scaling priors
on each term), then calls the BackEnd's build() method.
Args:
backend (str): The name of the backend to use for model fitting.
Currently, 'pymc' and 'stan' are supported. If None, assume
that fit() has already been called (possibly without building),
and look in self._backend_name.
|
def build(self, backend=None):
# retain only the complete cases
n_total = len(self.data.index)
if len(self.completes):
completes = [set(x) for x in sum(self.completes, [])]
completes = set.intersection(*completes)
else:
completes = [x for x in range(len(self.data.index))]
self.clean_data = self.data.iloc[list(completes), :]
# warn the user about any dropped rows
if len(completes) < n_total:
msg = "Automatically removing {}/{} rows from the dataset."
msg = msg.format(n_total - len(completes), n_total)
warnings.warn(msg)
# loop over the added terms and actually _add() them
for term_args in self.added_terms:
self._add(**term_args)
# set custom priors
self._set_priors(**self._added_priors)
# prepare all priors
for name, term in self.terms.items():
type_ = 'intercept' if name == 'Intercept' else \
'random' if self.terms[name].random else 'fixed'
term.prior = self._prepare_prior(term.prior, type_)
# check for backend
if backend is None:
if self._backend_name is None:
raise ValueError("Error: no backend was passed or set in the "
"Model; did you forget to call fit()?")
backend = self._backend_name
# check for outcome
if self.y is None:
raise ValueError("No outcome (y) variable is set! Please specify "
"an outcome variable using the formula interface "
"before build() or fit().")
# X = fixed effects design matrix (excluding intercept/constant term)
# r2_x = 1 - 1/VIF, i.e., R2 for predicting each x from all other x's.
# only compute these stats if there are multiple terms in the model
terms = [t for t in self.fixed_terms.values() if t.name != 'Intercept']
if len(self.fixed_terms) > 1:
X = [pd.DataFrame(x.data, columns=x.levels) for x in terms]
X = pd.concat(X, axis=1)
self.dm_statistics = {
'r2_x': pd.Series({
x: sm.OLS(endog=X[x],
exog=sm.add_constant(X.drop(x, axis=1))
if 'Intercept' in self.term_names
else X.drop(x, axis=1)).fit().rsquared
for x in list(X.columns)}),
'sd_x': X.std(),
'mean_x': X.mean(axis=0)
}
# save potentially useful info for diagnostics, send to
# ModelResults.
# mat = correlation matrix of X, w/ diagonal replaced by X means
mat = X.corr()
for x in list(mat.columns):
mat.loc[x, x] = self.dm_statistics['mean_x'][x]
self._diagnostics = {
# the Variance Inflation Factors (VIF), which is possibly
# useful for diagnostics
'VIF': 1/(1 - self.dm_statistics['r2_x']),
'corr_mean_X': mat
}
# throw informative error if perfect collinearity among fixed fx
if any(self.dm_statistics['r2_x'] > .999):
raise ValueError(
"There is perfect collinearity among the fixed effects!\n"
"Printing some design matrix statistics:\n" +
str(self.dm_statistics) + '\n' +
str(self._diagnostics))
# throw informative error message if any categorical predictors have 1
# category
num_cats = [x.data.size for x in self.fixed_terms.values()]
if any(np.array(num_cats) == 0):
raise ValueError(
"At least one categorical predictor contains only 1 category!")
# only set priors if there is at least one term in the model
if len(self.terms) > 0:
# Get and scale default priors if none are defined yet
if self.taylor is not None:
taylor = self.taylor
else:
taylor = 5 if self.family.name == 'gaussian' else 1
scaler = PriorScaler(self, taylor=taylor)
scaler.scale()
# For bernoulli models with n_trials = 1 (most common use case),
# tell user which event is being modeled
if self.family.name == 'bernoulli' and np.max(self.y.data) < 1.01:
event = next(
i for i, x in enumerate(self.y.data.flatten()) if x > .99)
warnings.warn('Modeling the probability that {}==\'{}\''.format(
self.y.name, str(self.clean_data[self.y.name].iloc[event])))
self._set_backend(backend)
self.backend.build(self)
self.built = True
| 207,242
|
Update the model arguments with additional arguments.
Args:
kwargs (dict): Optional keyword arguments to add to prior args.
|
def update(self, **kwargs):
# Backends expect numpy arrays, so make sure all numeric values are
# represented as such.
kwargs = {k: (np.array(v) if isinstance(v, (int, float)) else v)
for k, v in kwargs.items()}
self.args.update(kwargs)
| 207,259
|
Compile the Stan model from an abstract model specification.
Args:
spec (Model): A bambi Model instance containing the abstract
specification of the model to compile.
reset (bool): if True (default), resets the StanBackEnd instance
before compiling.
|
def build(self, spec, reset=True):
if reset:
self.reset()
n_cases = len(spec.y.data)
self.data.append('int<lower=1> N;')
self.X['N'] = n_cases
def _sanitize_name(name):
if name in self._original_names:
return name
clean = 'b_' + re.sub('[^a-zA-Z0-9\_]+', '_', name)
self._original_names[clean] = name
return clean
def _map_dist(dist, **kwargs):
if dist not in self.dists:
raise ValueError("There is no distribution named '%s' "
"in Stan." % dist)
stan_dist = self.dists[dist]
dist_name = stan_dist['name']
dist_args = stan_dist['args']
dist_bounds = stan_dist.get('bounds', '')
# Flat/HalfFlat/undefined priors are handled separately
if dist_name is None:
return None, dist_bounds
lookup_args = [a[1:] for a in dist_args if a.startswith('#')]
missing = set(lookup_args) - set(list(kwargs.keys()))
if missing:
raise ValueError("The following mandatory parameters of "
"the %s distribution are missing: %s."
% (dist, missing))
# Named arguments to take from the Prior object are denoted with
# a '#'; otherwise we take the value in the self.dists dict as-is.
dp = [kwargs[p[1:]] if p.startswith('#') else p for p in dist_args]
# Sometimes we get numpy arrays at this stage, so convert to float
dp = [float(p.ravel()[0]) if isinstance(p, np.ndarray) else p
for p in dp]
dist_term = '%s(%s)' % (dist_name, ', '.join([str(p) for p in dp]))
# handle Uniform variables, for which the bounds are the parameters
if dist_name=='uniform':
dist_bounds = dist_bounds.format(*dp)
return dist_term, dist_bounds
def _add_data(name, data, term):
if data.shape[1] == 1:
# For random effects, index into grouping variable
if n_cols > 1:
index_name = _sanitize_name('%s_grp_ind' % name)
self.data.append('int %s[N];' % index_name)
self.X[index_name] = t.group_index + 1 # 1-based indexing
predictor = 'vector[N] %s;'
else:
predictor = ('matrix[N, %d]' % (n_cols)) + ' %s;'
data_name = _sanitize_name('%s_data' % name)
var_name = _sanitize_name(name)
self.data.append(predictor % data_name)
self.X[data_name] = data.squeeze()
if data.shape[1] == 1 and n_cols > 1:
code = '%s[%s[n]] * %s[n]' % (var_name, index_name, data_name)
self.mu_cat.append(code)
else:
self.mu_cont.append('%s * %s' % (data_name, var_name))
def _add_parameters(name, dist_name, n_cols, **dist_args):
def _expand_args(k, v, name):
if isinstance(v, Prior):
name = _sanitize_name('%s_%s' % (name, k))
return _add_parameters(name, v.name, 1, **v.args)
return v
kwargs = {k: _expand_args(k, v, name)
for (k, v) in dist_args.items()}
_dist, _bounds = _map_dist(dist_name, **kwargs)
if n_cols == 1:
stan_par = 'real'
else:
stan_par = 'vector[%d]' % n_cols
var_name = _sanitize_name(name)
# non-centered parameterization
if spec.noncentered and 'sd' in kwargs and \
isinstance(kwargs['sd'], string_types):
offset_name = _sanitize_name(name + '_offset')
offset = 'vector[%d] %s;' % (n_cols, offset_name)
self.parameters.append(offset)
self.model.append('%s ~ normal(0, 1);' % offset_name)
self.transformed_parameters.append('%s%s %s;' % (stan_par,
_bounds,
var_name))
trans = '%s = multiply(%s, %s);' % (var_name, offset_name,
kwargs['sd'])
self.expressions.append(trans)
else:
self.parameters.append('%s%s %s;' % (stan_par, _bounds,
var_name))
if _dist is not None:
self.model.append('%s ~ %s;' % (var_name, _dist))
return name
for t in spec.terms.values():
data = t.data
label = t.name
dist_name = t.prior.name
dist_args = t.prior.args
n_cols = data.shape[1]
if t.random:
data = t.predictor
# Add to Stan model
_add_data(label, data, t)
_add_parameters(label, dist_name, n_cols, **dist_args)
# yhat
self.transformed_parameters.append('vector[N] yhat;')
if self.mu_cont:
yhat_cont = 'yhat = %s;' % ' + '.join(self.mu_cont)
self.expressions.append(yhat_cont)
else:
self.mu_cat.insert(0, '0')
if self.mu_cat:
loops = ('for (n in 1:N)\n\t\tyhat[n] = yhat[n] + %s'
% ' + '.join(self.mu_cat) + ';\n\t')
self.expressions.append(loops)
# Add expressions that go in transformed parameter block (they have
# to come after variable definitions)
self.transformed_parameters += self.expressions
# add response variable (y)
_response_format = self.families[spec.family.name]['format']
self.data.append('{} y{};'.format(*_response_format))
# add response distribution parameters other than the location
# parameter
for k, v in spec.family.prior.args.items():
if k != spec.family.parent and isinstance(v, Prior):
_bounds = _map_dist(v.name, **v.args)[1]
_param = 'real{} {}_{};'.format(_bounds, spec.y.name, k)
self.parameters.append(_param)
# specify the response distribution
_response_dist = self.families[spec.family.name]['name']
_response_args = '{}(yhat)'.format(self.links[spec.family.link])
_response_args = {spec.family.parent: _response_args}
for k, v in spec.family.prior.args.items():
if k != spec.family.parent:
_response_args[k] = '{}_{}'.format(spec.y.name, k) \
if isinstance(v, Prior) else str(v)
_dist = _map_dist(_response_dist, **_response_args)[0]
self.model.append('y ~ {};'.format(_dist))
# add the data
_response_type = self.families[spec.family.name]['type']
self.X['y'] = spec.y.data.astype(_response_type).squeeze()
# Construct the stan script
def format_block(name):
key = name.replace(' ', '_')
els = ''.join(['\t%s\n' % e for e in getattr(self, key)])
return '%s {\n%s}\n' % (name, els)
blocks = ['data', 'transformed data', 'parameters',
'transformed parameters', 'model']
self.model_code = ''.join([format_block(bl) for bl in blocks])
self.spec = spec
self.stan_model = ps.StanModel(model_code=self.model_code)
| 207,271
|
Run the Stan sampler.
Args:
samples (int): Number of samples to obtain (in each chain).
chains (int): Number of chains to use.
kwargs (dict): Optional keyword arguments passed onto the PyStan
StanModel.sampling() call.
Returns: A PyMC3ModelResults instance.
|
def run(self, samples=1000, chains=1, **kwargs):
self.fit = self.stan_model.sampling(data=self.X, iter=samples,
chains=chains, **kwargs)
return self._convert_to_results()
| 207,272
|
Run a parametrized generator
Args:
cache_root (str): The directory where to store the generated cores
Returns:
list: Cores created by the generator
|
def generate(self, cache_root):
generator_cwd = os.path.join(cache_root, 'generated', self.vlnv.sanitized_name)
generator_input_file = os.path.join(generator_cwd, self.name+'_input.yml')
logger.info('Generating ' + str(self.vlnv))
if not os.path.exists(generator_cwd):
os.makedirs(generator_cwd)
with open(generator_input_file, 'w') as f:
f.write(yaml.dump(self.generator_input))
args = [os.path.join(os.path.abspath(self.generator.root), self.generator.command),
generator_input_file]
if self.generator.interpreter:
args[0:0] = [self.generator.interpreter]
Launcher(args[0], args[1:],
cwd=generator_cwd).run()
cores = []
logger.debug("Looking for generated cores in " + generator_cwd)
for root, dirs, files in os.walk(generator_cwd):
for f in files:
if f.endswith('.core'):
try:
cores.append(Core(os.path.join(root, f)))
except SyntaxError as e:
w = "Failed to parse generated core file " + f + ": " + e.msg
raise RuntimeError(w)
logger.debug("Found " + ', '.join(str(c.name) for c in cores))
return cores
| 209,821
|
Creates a new object instance and adds the private finalizer
attributes to it.
Returns: new object instance
Arguments:
* *args, **kwargs -- ignored
|
def __new__(cls, *args, **kwargs):
instance = super(_AutoFinalizedObjectBase, cls).__new__(cls)
instance._finalize_called = False
return instance
| 210,023
|
Loads a library. Catches and logs exceptions.
Returns: the loaded library or None
arguments:
* lib -- path to/name of the library to be loaded
* name -- the library's identifier (for logging)
Defaults to None.
* lib_cls -- library class. Defaults to None (-> ctypes.CDLL).
|
def load_library(lib, name=None, lib_cls=None):
try:
if lib_cls:
return lib_cls(lib)
else:
return ctypes.CDLL(lib)
except Exception:
if name:
lib_msg = '%s (%s)' % (name, lib)
else:
lib_msg = lib
lib_msg += ' could not be loaded'
if sys.platform == 'cygwin':
lib_msg += ' in cygwin'
_LOGGER.error(lib_msg, exc_info=True)
return None
| 210,099
|
r"""Perform a bulk write request to the endpoint specified.
Arguments:
endpoint: endpoint number.
buffer: sequence data buffer to write.
This parameter can be any sequence type.
timeout: operation timeout in milliseconds. (default: 100)
Returns the number of bytes written.
|
def bulkWrite(self, endpoint, buffer, timeout = 100):
r
return self.dev.write(endpoint, buffer, timeout)
| 210,135
|
r"""Performs a bulk read request to the endpoint specified.
Arguments:
endpoint: endpoint number.
size: number of bytes to read.
timeout: operation timeout in milliseconds. (default: 100)
Returns a tuple with the data read.
|
def bulkRead(self, endpoint, size, timeout = 100):
r
return self.dev.read(endpoint, size, timeout)
| 210,136
|
r"""Perform a interrupt write request to the endpoint specified.
Arguments:
endpoint: endpoint number.
buffer: sequence data buffer to write.
This parameter can be any sequence type.
timeout: operation timeout in milliseconds. (default: 100)
Returns the number of bytes written.
|
def interruptWrite(self, endpoint, buffer, timeout = 100):
r
return self.dev.write(endpoint, buffer, timeout)
| 210,137
|
r"""Performs a interrupt read request to the endpoint specified.
Arguments:
endpoint: endpoint number.
size: number of bytes to read.
timeout: operation timeout in milliseconds. (default: 100)
Returns a tuple with the data read.
|
def interruptRead(self, endpoint, size, timeout = 100):
r
return self.dev.read(endpoint, size, timeout)
| 210,138
|
r"""Claims the interface with the Operating System.
Arguments:
interface: interface number or an Interface object.
|
def claimInterface(self, interface):
r
if isinstance(interface, Interface):
interface = interface.interfaceNumber
util.claim_interface(self.dev, interface)
self.__claimed_interface = interface
| 210,140
|
r"""Set the active configuration of a device.
Arguments:
configuration: a configuration value or a Configuration object.
|
def setConfiguration(self, configuration):
r
if isinstance(configuration, Configuration):
configuration = configuration.value
self.dev.set_configuration(configuration)
| 210,142
|
r"""Sets the active alternate setting of the current interface.
Arguments:
alternate: an alternate setting number or an Interface object.
|
def setAltInterface(self, alternate):
r
if isinstance(alternate, Interface):
alternate = alternate.alternateSetting
self.dev.set_interface_altsetting(self.__claimed_interface, alternate)
| 210,143
|
r"""Retrieve the string descriptor specified by index
and langid from a device.
Arguments:
index: index of descriptor in the device.
length: number of bytes of the string (ignored)
langid: Language ID. If it is omitted, the first
language will be used.
|
def getString(self, index, length, langid = None):
r
return util.get_string(self.dev, index, langid).encode('ascii')
| 210,144
|
r"""Retrieves a descriptor from the device identified by the type
and index of the descriptor.
Arguments:
desc_type: descriptor type.
desc_index: index of the descriptor.
len: descriptor length.
endpoint: ignored.
|
def getDescriptor(self, desc_type, desc_index, length, endpoint = -1):
r
return control.get_descriptor(self.dev, length, desc_type, desc_index)
| 210,145
|
r"""Detach a kernel driver from the interface (if one is attached,
we have permission and the operation is supported by the OS)
Arguments:
interface: interface number or an Interface object.
|
def detachKernelDriver(self, interface):
r
if isinstance(interface, Interface):
interface = interface.interfaceNumber
self.dev.detach_kernel_driver(interface)
| 210,146
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.