docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Create a new ADB stream. Args: destination: String identifier for the destination of this stream. transport: AdbStreamTransport to use for reads/writes.
def __init__(self, destination, transport): self._destination = destination self._transport = transport
205,198
Write data to this stream. Args: data: Data to write. timeout_ms: Timeout to use for the write/Ack transaction, in milliseconds (or as a PolledTimeout object). Raises: AdbProtocolError: If an ACK is not received. AdbStreamClosedError: If the stream is already closed, or gets cl...
def write(self, data, timeout_ms=None): timeout = timeouts.PolledTimeout.from_millis(timeout_ms) # Break the data up into our transport's maxdata sized WRTE messages. while data: self._transport.write( data[:self._transport.adb_connection.maxdata], timeout) data = data[self._trans...
205,200
Yield data until this stream is closed. Args: timeout_ms: Timeout in milliseconds to keep reading (or a PolledTimeout object). Yields: Data read from a single call to self.read(), until the stream is closed or timeout is reached. Raises: AdbTimeoutError: On timeout.
def read_until_close(self, timeout_ms=None): while True: try: yield self.read(timeout_ms=timeout_ms) except usb_exceptions.AdbStreamClosedError: break
205,202
Add the given message to this transport's queue. This method also handles ACKing any WRTE messages. Args: message: The AdbMessage to enqueue. timeout: The timeout to use for the operation. Specifically, WRTE messages cause an OKAY to be sent; timeout is used for that send.
def enqueue_message(self, message, timeout): # Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued. if message.command == 'WRTE': self._send_command('OKAY', timeout=timeout) elif message.command == 'OKAY': self._set_or_check_remote_id(message.arg0) self.message_queue....
205,209
Read 'length' bytes from this stream transport. Args: length: If not 0, read this many bytes from the stream, otherwise read all available data (at least one byte). timeout: timeouts.PolledTimeout to use for this read operation. Returns: The bytes read from this stream.
def read(self, length, timeout): self._read_messages_until_true( lambda: self._buffer_size and self._buffer_size >= length, timeout) with self._read_buffer_lock: data, push_back = ''.join(self._read_buffer), '' if length: data, push_back = data[:length], data[length:] sel...
205,211
Create an ADB connection to a device. Args: transport: AdbTransportAdapter to use for reading/writing AdbMessages maxdata: Max data size the remote endpoint will accept. remote_banner: Banner received from the remote endpoint.
def __init__(self, transport, maxdata, remote_banner): try: self.systemtype, self.serial, self.banner = remote_banner.split(':', 2) except ValueError: raise usb_exceptions.AdbProtocolError('Received malformed banner %s', remote_banner) self.transp...
205,213
Instantiate required plugs. Instantiates plug types and saves the instances in self._plugs_by_type for use in provide_plugs(). Args: plug_types: Plug types may be specified here rather than passed into the constructor (this is used primarily for unit testing phase...
def initialize_plugs(self, plug_types=None): types = plug_types if plug_types is not None else self._plug_types for plug_type in types: # Create a logger for this plug. All plug loggers go under the 'plug' # sub-logger in the logger hierarchy. plug_logger = self.logger.getChild(plug_type....
205,229
Wait for a change in the state of a frontend-aware plug. Args: plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'. remote_state: The last observed state. timeout_s: Number of seconds to wait for an update. Returns: An updated state, or None if the timeout runs out. Ra...
def wait_for_plug_update(self, plug_name, remote_state, timeout_s): plug = self._plugs_by_name.get(plug_name) if plug is None: raise InvalidPlugError('Cannot wait on unknown plug "%s".' % plug_name) if not isinstance(plug, FrontendAwareBasePlug): raise InvalidPlugError('Cannot wait on a p...
205,233
Wait for any in a list of threading.Event's to be set. Args: events: List of threading.Event's. timeout_s: Max duration in seconds to wait before returning. Returns: True if at least one event was set before the timeout expired, else False.
def _wait_for_any_event(events, timeout_s): def any_event_set(): return any(event.is_set() for event in events) result = timeouts.loop_until_timeout_or_true( timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S) return result or any_event_set()
205,236
Save a logging.LogRecord to our test record. Logs carry useful metadata such as the logger name and level information. We capture this in a structured format in the test record to enable filtering by client applications. Args: record: A logging.LogRecord to record.
def emit(self, record): try: message = self.format(record) log_record = LogRecord( record.levelno, record.name, os.path.basename(record.pathname), record.lineno, int(record.created * 1000), message, ) self._test_record.add_log_record(log_record) self._notify_up...
205,283
Create an uploader given (parsed) JSON data. Note that this is a JSON-formatted key file downloaded from Google when the service account key is created, *NOT* a json-encoded oauth2client.client.SignedJwtAssertionCredentials object. Args: json_data: Dict containing the loaded JSON key data. ...
def from_json(cls, json_data): return cls(user=json_data['client_email'], keydata=json_data['private_key'], token_uri=json_data['token_uri'])
205,288
Block until this command has completed. Args: timeout_ms: Timeout, in milliseconds, to wait. Returns: Output of the command if it complete and self.stdout is a StringIO object or was passed in as None. Returns True if the command completed but stdout was provided (and was not a StringIO o...
def wait(self, timeout_ms=None): closed = timeouts.loop_until_timeout_or_true( timeouts.PolledTimeout.from_millis(timeout_ms), self.stream.is_closed, .1) if closed: if hasattr(self.stdout, 'getvalue'): return self.stdout.getvalue() return True return None
205,296
Initializes the configuration state. We have to pull everything we need from global scope into here because we will be swapping out the module with this instance and will lose any global references. Args: logger: Logger to use for logging messages within this class. lock: Threading.lock to...
def __init__(self, logger, lock, parser, **kwargs): self._logger = logger self._lock = lock self._modules = kwargs self._declarations = {} self.ARG_PARSER = parser # Parse just the flags we care about, since this happens at import time. self._flags, _ = parser.parse_known_args() se...
205,300
Load flag values given from command line flags. Args: flags: An argparse Namespace containing the command line flags.
def load_flag_values(self, flags=None): if flags is None: flags = self._flags for keyval in flags.config_value: k, v = keyval.split('=', 1) v = self._modules['yaml'].load(v) if isinstance(v, str) else v # Force any command line keys and values that are bytes to unicode. k = k...
205,301
Get a config value via item access. Order of precedence is: - Value provided via --config-value flag. - Value loaded via load*() methods. - Default value as declared with conf.declare() Args: item: Config key name to get.
def __getitem__(self, item): # pylint: disable=invalid-name if item not in self._declarations: raise self.UndeclaredKeyError('Configuration key not declared', item) if item in self._flag_values: if item in self._loaded_values: self._logger.warning( 'Overriding loaded value...
205,304
Declare a configuration key with the given name. Args: name: Configuration key to declare, must not have been already declared. description: If provided, use this as the description for this key. **kwargs: Other kwargs to pass to the Declaration, only default_value is currently supporte...
def declare(self, name, description=None, **kwargs): if not self._is_valid_key(name): raise self.InvalidKeyError( 'Invalid key name, must begin with a lowercase letter', name) if name in self._declarations: raise self.KeyAlreadyDeclaredError( 'Configuration key already decla...
205,306
Loads the configuration from a file. Parsed contents must be a single dict mapping config key to value. Args: yamlfile: The opened file object to load configuration from. See load_from_dict() for other args' descriptions. Raises: ConfigurationInvalidError: If configuration file can't be...
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False): self._logger.info('Loading configuration from file: %s', yamlfile) try: parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read()) except self._modules['yaml'].YAMLError: self._logger.exception('Problem parsin...
205,308
Fallthrough to underlying FastbootProtocol handler. Args: attr: Attribute to get. Returns: Either the attribute from the device or a retrying function-wrapper if attr is a method on the device.
def __getattr__(self, attr): # pylint: disable=invalid-name if not self._protocol: raise usb_exceptions.HandleClosedError() val = getattr(self._protocol, attr) if callable(val): def _retry_wrapper(*args, **kwargs): result = _retry_usb_function(self._num_retries, val, *arg...
205,325
Logs the given message every n calls to a logger. Args: n: Number of calls before logging. logger: The logger to which to log. level: The logging level (e.g. logging.INFO). message: A message to log *args: Any format args for the message. Returns: A method that logs and returns True every n...
def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name logger = logger or logging.getLogger() def _gen(): # pylint: disable=missing-docstring while True: for _ in range(n): yield False logger.log(level, message, *args) yield True gen = _gen(...
205,379
Loops until the specified function returns non-None or until a timeout. Args: timeout_s: The number of seconds to wait until a timeout condition is reached. As a convenience, this accepts None to mean never timeout. Can also be passed a PolledTimeout object instead of an integer. function: T...
def loop_until_timeout_or_not_none(timeout_s, function, sleep_s=1): # pylint: disable=invalid-name return loop_until_timeout_or_valid( timeout_s, function, lambda x: x is not None, sleep_s)
205,401
Executes a method forever at the specified interval. Args: method: The callable to execute. interval_s: The number of seconds to start the execution after each method finishes. Returns: An Interval object.
def execute_forever(method, interval_s): # pylint: disable=invalid-name interval = Interval(method) interval.start(interval_s) return interval
205,403
Executes a method forever until the method returns a false value. Args: method: The callable to execute. interval_s: The number of seconds to start the execution after each method finishes. Returns: An Interval object.
def execute_until_false(method, interval_s): # pylint: disable=invalid-name interval = Interval(method, stop_if_false=True) interval.start(interval_s) return interval
205,404
A function that returns whether a function call took less than time_s. NOTE: The function call is not killed and will run indefinitely if hung. Args: time_s: Maximum amount of time to take. func: Function to call. *args: Arguments to call the function with. **kwargs: Keyword arguments to call the ...
def take_at_most_n_seconds(time_s, func, *args, **kwargs): thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.start() thread.join(time_s) if thread.is_alive(): return False return True
205,409
Construct a PolledTimeout object. Args: timeout_s: This may either be a number or None. If a number, this object will consider to be expired after number seconds after construction. If None, this object never expires.
def __init__(self, timeout_s): self.start = time.time() self.timeout_s = timeout_s
205,411
Create a new PolledTimeout if needed. If timeout_ms is already a PolledTimeout, just return it, otherwise create a new PolledTimeout with the given timeout in milliseconds. Args: timeout_ms: PolledTimeout object, or number of milliseconds to use for creating a new one. Returns: A ...
def from_millis(cls, timeout_ms): if hasattr(timeout_ms, 'has_expired'): return timeout_ms if timeout_ms is None: return cls(None) return cls(timeout_ms / 1000.0)
205,412
Initializes the Interval. Args: method: A callable to execute, it should take no arguments. stop_if_false: If True, the interval will exit if the method returns False.
def __init__(self, method, stop_if_false=False): self.method = method self.stopped = threading.Event() self.thread = None self.stop_if_false = stop_if_false
205,413
Starts executing the method at the specified interval. Args: interval_s: The amount of time between executions of the method. Returns: False if the interval was already running.
def start(self, interval_s): if self.running: return False self.stopped.clear() def _execute(): # Always execute immediately once if not self.method() and self.stop_if_false: return while not self.stopped.wait(interval_s): if not self.method() and self.stop_if_...
205,414
Stops the interval. If a timeout is provided and stop returns False then the thread is effectively abandoned in whatever state it was in (presumably dead-locked). Args: timeout_s: The time in seconds to wait on the thread to finish. By default it's forever. Returns: False if a t...
def stop(self, timeout_s=None): self.stopped.set() if self.thread: self.thread.join(timeout_s) return not self.thread.isAlive() else: return True
205,415
Joins blocking until the interval ends or until timeout is reached. Args: timeout_s: The time in seconds to wait, defaults to forever. Returns: True if the interval is still running and we reached the timeout.
def join(self, timeout_s=None): if not self.thread: return False self.thread.join(timeout_s) return self.running
205,416
Returns an OpenHTF phase for use as a prompt-based start trigger. Args: message: The message to display to the user. timeout_s: Seconds to wait before raising a PromptUnansweredError. validator: Function used to validate or modify the serial number. cli_color: An ANSI color code, or the empty string.
def prompt_for_test_start( message='Enter a DUT ID in order to start the test.', timeout_s=60*60*24, validator=lambda sn: sn, cli_color=''): @PhaseOptions(timeout_s=timeout_s) @plugs.plug(prompts=UserInput) def trigger_phase(test, prompts): dut_id = prompts.prompt( message, text_input...
205,421
Initializes a ConsolePrompt. Args: message: A string to be presented to the user. callback: A function to be called with the response string. color: An ANSI color code, or the empty string.
def __init__(self, message, callback, color=''): super(ConsolePrompt, self).__init__() self.daemon = True self._message = message self._callback = callback self._color = color self._stop_event = threading.Event() self._answered = False
205,422
Display a prompt. Args: message: A string to be presented to the user. text_input: A boolean indicating whether the user must respond with text. cli_color: An ANSI color code, or the empty string. Raises: MultiplePromptsError: There was already an existing prompt. Returns: A...
def start_prompt(self, message, text_input=False, cli_color=''): with self._cond: if self._prompt: raise MultiplePromptsError prompt_id = uuid.uuid4().hex _LOG.debug('Displaying prompt (%s): "%s"%s', prompt_id, message, ', Expects text input.' if text_input else '') ...
205,428
Wait for the user to respond to the current prompt. Args: timeout_s: Seconds to wait before raising a PromptUnansweredError. Returns: A string response, or the empty string if text_input was False. Raises: PromptUnansweredError: Timed out waiting for the user to respond.
def wait_for_prompt(self, timeout_s=None): with self._cond: if self._prompt: if timeout_s is None: self._cond.wait(3600 * 24 * 365) else: self._cond.wait(timeout_s) if self._response is None: raise PromptUnansweredError return self._response
205,429
Respond to the prompt with the given ID. If there is no active prompt or the given ID doesn't match the active prompt, do nothing. Args: prompt_id: A string uniquely identifying the prompt. response: A string response to the given prompt. Returns: True if the prompt with the given I...
def respond(self, prompt_id, response): _LOG.debug('Responding to prompt (%s): "%s"', prompt_id, response) with self._cond: if not (self._prompt and self._prompt.id == prompt_id): return False self._response = response self.last_response = (prompt_id, response) self.remove_p...
205,430
Executes a phase or skips it, yielding PhaseExecutionOutcome instances. Args: phase: Phase to execute. Returns: The final PhaseExecutionOutcome that wraps the phase return value (or exception) of the final phase run. All intermediary results, if any, are REPEAT and handled internally. ...
def execute_phase(self, phase): repeat_count = 1 repeat_limit = phase.options.repeat_limit or sys.maxsize while not self._stopping.is_set(): is_last_repeat = repeat_count >= repeat_limit phase_execution_outcome = self._execute_phase_once(phase, is_last_repeat) if phase_execution_outc...
205,439
Stops execution of the current phase, if any. It will raise a ThreadTerminationError, which will cause the test to stop executing and terminate with an ERROR state. Args: timeout_s: int or None, timeout in seconds to wait for the phase to stop.
def stop(self, timeout_s=None): self._stopping.set() with self._current_phase_thread_lock: phase_thread = self._current_phase_thread if not phase_thread: return if phase_thread.is_alive(): phase_thread.kill() _LOG.debug('Waiting for cancelled phase to exit: %s', phase_...
205,441
Apply only the args that the phase knows. If the phase has a **kwargs-style argument, it counts as knowing all args. Args: phase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or iterable of those, the phase or phase group (or iterable) to apply with_args to. **kwargs: argume...
def optionally_with_args(phase, **kwargs): if isinstance(phase, PhaseGroup): return phase.with_args(**kwargs) if isinstance(phase, collections.Iterable): return [optionally_with_args(p, **kwargs) for p in phase] if not isinstance(phase, phase_descriptor.PhaseDescriptor): phase = phase_descriptor.P...
205,444
Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them.
def publish(cls, message, client_filter=None): with cls._lock: for client in cls.subscribers: if (not client_filter) or client_filter(client): client.send(message)
205,456
Return a new PhaseDescriptor from the given function or instance. We want to return a new copy so that you can reuse a phase with different options, plugs, measurements, etc. Args: func: A phase function or PhaseDescriptor instance. **options: Options to update on the result. Raises: ...
def wrap_or_copy(cls, func, **options): if isinstance(func, openhtf.PhaseGroup): raise PhaseWrapError('Cannot wrap PhaseGroup <%s> as a phase.' % ( func.name or 'Unnamed')) if isinstance(func, cls): # We want to copy so that a phase can be reused with different options # or kwar...
205,469
Decorator to ensure a handle is open for certain methods. Subclasses should decorate their Read() and Write() with this rather than checking their own internal state, keeping all "is this handle open" logic in is_closed(). Args: method: A class method on a subclass of UsbHandle Raises: HandleClosed...
def requires_open_handle(method): # pylint: disable=invalid-name @functools.wraps(method) def wrapper_requiring_open_handle(self, *args, **kwargs): if self.is_closed(): raise usb_exceptions.HandleClosedError() return method(self, *args, **kwargs) return wrapper_requiring_open_handle
205,475
Construct a EtherSync object. Args: mac_addr: mac address of the Cambrionix unit for EtherSync.
def __init__(self, mac_addr): addr_info = mac_addr.lower().split(':') if len(addr_info) < 6: raise ValueError('Invalid mac address') addr_info[2] = 'EtherSync' self._addr = ''.join(addr_info[2:])
205,482
Get the device serial number Args: port_num: port number on the Cambrionix unit Return: usb device serial number
def get_usb_serial(self, port_num): port = self.port_map[str(port_num)] arg = ''.join(['DEVICE INFO,', self._addr, '.', port]) cmd = (['esuit64', '-t', arg]) info = subprocess.check_output(cmd, stderr=subprocess.STDOUT) serial = None if "SERIAL" in info: serial_info = info.split('SER...
205,483
open usb port Args: port_num: port number on the Cambrionix unit Return: usb handle
def open_usb_handle(self, port_num): serial = self.get_usb_serial(port_num) return local_usb.LibUsbHandle.open(serial_number=serial)
205,484
Print the error message to the file in the specified color. Args: msg: The error message to be printed. color: Optional colorama color string to be applied to the message. You can concatenate colorama color strings together here, but note that style strings will not be applied. file: A fi...
def error_print(msg, color=colorama.Fore.RED, file=sys.stderr): if CLI_QUIET: return file.write('{sep}{bright}{color}Error: {normal}{msg}{sep}{reset}'.format( sep=_linesep_for_file(file), bright=colorama.Style.BRIGHT, color=color, normal=colorama.Style.NORMAL, msg=msg, reset=colorama.Style.RESET_...
205,491
Create a new consumer for a queue. Parameters: queue_name(str): The queue to consume. prefetch(int): The number of messages to prefetch. timeout(int): The idle timeout in milliseconds. Returns: Consumer: A consumer that retrieves messages from Redis.
def consume(self, queue_name, prefetch=1, timeout=5000): return _RedisConsumer(self, queue_name, prefetch, timeout)
205,946
Enqueue a message. Parameters: message(Message): The message to enqueue. delay(int): The minimum amount of time, in milliseconds, to delay the message by. Must be less than 7 days. Raises: ValueError: If ``delay`` is longer than 7 days.
def enqueue(self, message, *, delay=None): queue_name = message.queue_name # Each enqueued message must have a unique id in Redis so # using the Message's id isn't safe because messages may be # retried. message = message.copy(options={ "redis_message_id": s...
205,947
Drop all the messages from a queue. Parameters: queue_name(str): The queue to flush.
def flush(self, queue_name): for name in (queue_name, dq_name(queue_name)): self.do_purge(name)
205,948
Get a result from the backend. Parameters: message(Message) block(bool): Whether or not to block until a result is set. timeout(int): The maximum amount of time, in ms, to wait for a result when block is True. Defaults to 10 seconds. Raises: ResultM...
def get_result(self, message, *, block: bool = False, timeout: int = None) -> Result: if timeout is None: timeout = DEFAULT_TIMEOUT end_time = time.monotonic() + timeout / 1000 message_key = self.build_message_key(message) attempts = 0 while True: ...
205,961
Store a result in the backend. Parameters: message(Message) result(object): Must be serializable. ttl(int): The maximum amount of time the result may be stored in the backend for.
def store_result(self, message, result: Result, ttl: int) -> None: message_key = self.build_message_key(message) return self._store(message_key, result, ttl)
205,962
Given a message, return its globally-unique key. Parameters: message(Message) Returns: str
def build_message_key(self, message) -> str: message_key = "%(namespace)s:%(queue_name)s:%(actor_name)s:%(message_id)s" % { "namespace": self.namespace, "queue_name": q_name(message.queue_name), "actor_name": message.actor_name, "message_id": message.mess...
205,963
Attempt to acquire a slot under this rate limiter. Parameters: raise_on_failure(bool): Whether or not failures should raise an exception. If this is false, the context manager will instead return a boolean value representing whether or not the rate limit slot was ...
def acquire(self, *, raise_on_failure=True): acquired = False try: acquired = self._acquire() if raise_on_failure and not acquired: raise RateLimitExceeded("rate limit exceeded for key %(key)r" % vars(self)) yield acquired finally: ...
205,966
Compute an exponential backoff value based on some number of attempts. Parameters: attempts(int): The number of attempts there have been so far. factor(int): The number of milliseconds to multiply each backoff by. max_backoff(int): The max number of milliseconds to backoff by. max_exponent(...
def compute_backoff(attempts, *, factor=5, jitter=True, max_backoff=2000, max_exponent=32): exponent = min(attempts, max_exponent) backoff = min(factor * 2 ** exponent, max_backoff) if jitter: backoff /= 2 backoff = int(backoff + uniform(0, backoff)) return attempts + 1, backoff
206,004
The join() method of standard queues in Python doesn't support timeouts. This implements the same functionality as that method, with optional timeout support, by depending the internals of Queue. Raises: QueueJoinTimeout: When the timeout is reached. Parameters: timeout(Optional[float...
def join_queue(queue, timeout=None): with queue.all_tasks_done: while queue.unfinished_tasks: finished_in_time = queue.all_tasks_done.wait(timeout=timeout) if not finished_in_time: raise QueueJoinTimeout("timed out after %.02f seconds" % timeout)
206,005
Wait on a list of objects that can be joined with a total timeout represented by ``timeout``. Parameters: joinables(object): Objects with a join method. timeout(int): The total timeout in milliseconds.
def join_all(joinables, timeout): started, elapsed = current_millis(), 0 for ob in joinables: ob.join(timeout=timeout / 1000) elapsed = current_millis() - started timeout = max(0, timeout - elapsed)
206,006
Declare a new actor on this broker. Declaring an Actor twice replaces the first actor with the second by name. Parameters: actor(Actor): The actor being declared.
def declare_actor(self, actor): # pragma: no cover self.emit_before("declare_actor", actor) self.declare_queue(actor.queue_name) self.actors[actor.actor_name] = actor self.emit_after("declare_actor", actor)
206,014
Alias for the RabbitMQ broker that takes a connection URL as a positional argument. Parameters: url(str): A connection string. middleware(list[Middleware]): The middleware to add to this broker.
def URLRabbitmqBroker(url, *, middleware=None): warnings.warn( "Use RabbitmqBroker with the 'url' parameter instead of URLRabbitmqBroker.", DeprecationWarning, stacklevel=2, ) return RabbitmqBroker(url=url, middleware=middleware)
206,022
Create a new consumer for a queue. Parameters: queue_name(str): The queue to consume. prefetch(int): The number of messages to prefetch. timeout(int): The idle timeout in milliseconds. Returns: Consumer: A consumer that retrieves messages from RabbitMQ.
def consume(self, queue_name, prefetch=1, timeout=5000): return _RabbitmqConsumer(self.parameters, queue_name, prefetch, timeout)
206,029
Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed.
def declare_queue(self, queue_name): attempts = 1 while True: try: if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self._declare_queue(queue_name) self.queues.add(queue_name) ...
206,030
Enqueue a message. Parameters: message(Message): The message to enqueue. delay(int): The minimum amount of time, in milliseconds, to delay the message by. Raises: ConnectionClosed: If the underlying channel or connection has been closed.
def enqueue(self, message, *, delay=None): queue_name = message.queue_name properties = pika.BasicProperties( delivery_mode=2, priority=message.options.get("broker_priority"), ) if delay is not None: queue_name = dq_name(queue_name) ...
206,035
Get the number of messages in a queue. This method is only meant to be used in unit and integration tests. Parameters: queue_name(str): The queue whose message counts to get. Returns: tuple: A triple representing the number of messages in the queue, its delayed q...
def get_queue_message_counts(self, queue_name): queue_response = self._declare_queue(queue_name) dq_queue_response = self._declare_dq_queue(queue_name) xq_queue_response = self._declare_xq_queue(queue_name) return ( queue_response.method.message_count, dq...
206,036
Drop all the messages from a queue. Parameters: queue_name(str): The queue to flush.
def flush(self, queue_name): for name in (queue_name, dq_name(queue_name), xq_name(queue_name)): self.channel.queue_purge(name)
206,037
Create the barrier for the given number of parties. Parameters: parties(int): The number of parties to wait for. Returns: bool: Whether or not the new barrier was successfully created.
def create(self, parties): assert parties > 0, "parties must be a positive integer." return self.backend.add(self.key, parties, self.ttl)
206,046
Create a new consumer for a queue. Parameters: queue_name(str): The queue to consume. prefetch(int): The number of messages to prefetch. timeout(int): The idle timeout in milliseconds. Raises: QueueNotFound: If the queue hasn't been declared. Returns: ...
def consume(self, queue_name, prefetch=1, timeout=100): try: return _StubConsumer( self.queues[queue_name], self.dead_letters_by_queue[queue_name], timeout, ) except KeyError: raise QueueNotFound(queue_name)
206,074
Declare a queue. Has no effect if a queue with the given name has already been declared. Parameters: queue_name(str): The name of the new queue.
def declare_queue(self, queue_name): if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self.queues[queue_name] = Queue() self.emit_after("declare_queue", queue_name) delayed_name = dq_name(queue_name) self.queues...
206,075
Enqueue a message. Parameters: message(Message): The message to enqueue. delay(int): The minimum amount of time, in milliseconds, to delay the message by. Raises: QueueNotFound: If the queue the message is being enqueued on doesn't exist.
def enqueue(self, message, *, delay=None): queue_name = message.queue_name if delay is not None: queue_name = dq_name(queue_name) message_eta = current_millis() + delay message = message.copy( queue_name=queue_name, options={ ...
206,076
Drop all the messages from a queue. Parameters: queue_name(str): The queue to flush.
def flush(self, queue_name): for _ in iter_queue(self.queues[queue_name]): self.queues[queue_name].task_done()
206,077
Run this pipeline. Parameters: delay(int): The minimum amount of time, in milliseconds, the pipeline should be delayed by. Returns: pipeline: Itself.
def run(self, *, delay=None): self.broker.enqueue(self.messages[0], delay=delay) return self
206,102
Get the results of each job in the pipeline. Parameters: block(bool): Whether or not to block until a result is set. timeout(int): The maximum amount of time, in ms, to wait for a result when block is True. Defaults to 10 seconds. Raises: ResultMissing: When ...
def get_results(self, *, block=False, timeout=None): deadline = None if timeout: deadline = time.monotonic() + timeout / 1000 for message in self.messages: if deadline: timeout = max(0, int((deadline - time.monotonic()) * 1000)) yiel...
206,104
Run the actors in this group. Parameters: delay(int): The minimum amount of time, in milliseconds, each message in the group should be delayed by.
def run(self, *, delay=None): for child in self.children: if isinstance(child, (group, pipeline)): child.run(delay=delay) else: self.broker.enqueue(child, delay=delay) return self
206,107
Get the results of each job in the group. Parameters: block(bool): Whether or not to block until the results are stored. timeout(int): The maximum amount of time, in milliseconds, to wait for results when block is True. Defaults to 10 seconds. Raises: ...
def get_results(self, *, block=False, timeout=None): deadline = None if timeout: deadline = time.monotonic() + timeout / 1000 for child in self.children: if deadline: timeout = max(0, int((deadline - time.monotonic()) * 1000)) if isi...
206,108
Block until all the jobs in the group have finished or until the timeout expires. Parameters: timeout(int): The maximum amount of time, in ms, to wait. Defaults to 10 seconds.
def wait(self, *, timeout=None): for _ in self.get_results(block=True, timeout=timeout): # pragma: no cover pass
206,109
Build a message. This method is useful if you want to compose actors. See the actor composition documentation for details. Parameters: *args(tuple): Positional arguments to send to the actor. **kwargs(dict): Keyword arguments to send to the actor. Examples: ...
def message(self, *args, **kwargs): return self.message_with_options(args=args, kwargs=kwargs)
206,112
Asynchronously send a message to this actor. Parameters: *args(tuple): Positional arguments to send to the actor. **kwargs(dict): Keyword arguments to send to the actor. Returns: Message: The enqueued message.
def send(self, *args, **kwargs): return self.send_with_options(args=args, kwargs=kwargs)
206,114
Synchronously call this actor. Parameters: *args: Positional arguments to send to the actor. **kwargs: Keyword arguments to send to the actor. Returns: Whatever the underlying function backing this actor returns.
def __call__(self, *args, **kwargs): try: self.logger.debug("Received args=%r kwargs=%r.", args, kwargs) start = time.perf_counter() return self.fn(*args, **kwargs) finally: delta = time.perf_counter() - start self.logger.debug("Comple...
206,116
Gracefully stop the Worker and all of its consumers and workers. Parameters: timeout(int): The number of milliseconds to wait for everything to shut down.
def stop(self, timeout=600000): self.broker.emit_before("worker_shutdown", self) self.logger.info("Shutting down...") # Stop workers before consumers. The consumers are kept alive # during this process so that heartbeats keep being sent to # the broker while workers fi...
206,123
Process a message pulled off of the work queue then push it back to its associated consumer for post processing. Parameters: message(MessageProxy)
def process_message(self, message): try: self.logger.debug("Received message %s with id %r.", message, message.message_id) self.broker.emit_before("process_message", message) res = None if not message.failed: actor = self.broker.get_actor...
206,138
Create a scanner for running scanning commands synchronously. Args: network_retries: How many times SSLyze should retry a connection that timed out. network_timeout: The time until an ongoing connection times out.
def __init__( self, network_retries: int = DEFAULT_NETWORK_RETRIES, network_timeout: int = DEFAULT_NETWORK_TIMEOUT ) -> None: self._plugins_repository = PluginsRepository() # Set global network settings SslConnection.set_global_network_settings(n...
206,607
Queue a scan command targeting a specific server. Args: server_info: The server's connectivity information. The test_connectivity_to_server() method must have been called first to ensure that the server is online and accessible. scan_command: The scan command to run agai...
def queue_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> None: # Ensure we have the right processes and queues in place for this hostname self._check_and_create_process(server_info.hostname) # Add the task to the right queue self._qu...
206,643
The WSGI Application Server. Arguments: environ {dict} -- The WSGI environ dictionary start_response {WSGI callable} Returns: WSGI Response
def app(environ, start_response): from wsgi import container container.bind('Environ', environ) try: for provider in container.make('WSGIProviders'): container.resolve(provider.boot) except Exception as e: container.make('ExceptionHandler').load_exception(e...
206,990
Show the welcome page. Arguments: view {masonite.view.View} -- The Masonite view class. Application {config.application} -- The application config module. Returns: masonite.view.View -- The Masonite view class.
def show(self, view: View, request: Request): return view.render('welcome', { 'app': request.app().make('Application') })
206,993
Compile the PyMC3 model from an abstract model specification. Args: spec (Model): A bambi Model instance containing the abstract specification of the model to compile. reset (bool): if True (default), resets the PyMC3BackEnd instance before compiling.
def build(self, spec, reset=True): if reset: self.reset() with self.model: self.mu = 0. for t in spec.terms.values(): data = t.data label = t.name dist_name = t.prior.name dist_args = t.prior...
207,213
Compute autocorrelation using FFT for every lag for the input array https://en.wikipedia.org/wiki/Autocorrelation#Efficient_computation. Args: x (array-like): An array containing MCMC samples. Returns: np.ndarray: An array of the same size as the input array.
def autocorr(x): y = x - x.mean() n = len(y) result = fftconvolve(y, y[::-1]) acorr = result[len(result) // 2:] acorr /= np.arange(n, 0, -1) acorr /= acorr[0] return acorr
207,234
Compute autocovariance estimates for every lag for the input array. Args: x (array-like): An array containing MCMC samples. Returns: np.ndarray: An array of the same size as the input array.
def autocov(x): acorr = autocorr(x) varx = np.var(x, ddof=1) * (len(x) - 1) / len(x) acov = acorr * varx return acov
207,235
Set up the model for sampling/fitting. Performs any steps that require access to all model terms (e.g., scaling priors on each term), then calls the BackEnd's build() method. Args: backend (str): The name of the backend to use for model fitting. Currently, 'pymc' an...
def build(self, backend=None): # retain only the complete cases n_total = len(self.data.index) if len(self.completes): completes = [set(x) for x in sum(self.completes, [])] completes = set.intersection(*completes) else: completes = [x for x i...
207,242
Update the model arguments with additional arguments. Args: kwargs (dict): Optional keyword arguments to add to prior args.
def update(self, **kwargs): # Backends expect numpy arrays, so make sure all numeric values are # represented as such. kwargs = {k: (np.array(v) if isinstance(v, (int, float)) else v) for k, v in kwargs.items()} self.args.update(kwargs)
207,259
Compile the Stan model from an abstract model specification. Args: spec (Model): A bambi Model instance containing the abstract specification of the model to compile. reset (bool): if True (default), resets the StanBackEnd instance before compiling.
def build(self, spec, reset=True): if reset: self.reset() n_cases = len(spec.y.data) self.data.append('int<lower=1> N;') self.X['N'] = n_cases def _sanitize_name(name): if name in self._original_names: return name ...
207,271
Run the Stan sampler. Args: samples (int): Number of samples to obtain (in each chain). chains (int): Number of chains to use. kwargs (dict): Optional keyword arguments passed onto the PyStan StanModel.sampling() call. Returns: A PyMC3ModelResults inst...
def run(self, samples=1000, chains=1, **kwargs): self.fit = self.stan_model.sampling(data=self.X, iter=samples, chains=chains, **kwargs) return self._convert_to_results()
207,272
Run a parametrized generator Args: cache_root (str): The directory where to store the generated cores Returns: list: Cores created by the generator
def generate(self, cache_root): generator_cwd = os.path.join(cache_root, 'generated', self.vlnv.sanitized_name) generator_input_file = os.path.join(generator_cwd, self.name+'_input.yml') logger.info('Generating ' + str(self.vlnv)) if not os.path.exists(generator_cwd): ...
209,821
Creates a new object instance and adds the private finalizer attributes to it. Returns: new object instance Arguments: * *args, **kwargs -- ignored
def __new__(cls, *args, **kwargs): instance = super(_AutoFinalizedObjectBase, cls).__new__(cls) instance._finalize_called = False return instance
210,023
Loads a library. Catches and logs exceptions. Returns: the loaded library or None arguments: * lib -- path to/name of the library to be loaded * name -- the library's identifier (for logging) Defaults to None. * lib_cls -- library class. Defaults to None (-> cty...
def load_library(lib, name=None, lib_cls=None): try: if lib_cls: return lib_cls(lib) else: return ctypes.CDLL(lib) except Exception: if name: lib_msg = '%s (%s)' % (name, lib) else: lib_msg = lib lib_msg += ' could not...
210,099
r"""Perform a bulk write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100...
def bulkWrite(self, endpoint, buffer, timeout = 100): r return self.dev.write(endpoint, buffer, timeout)
210,135
r"""Performs a bulk read request to the endpoint specified. Arguments: endpoint: endpoint number. size: number of bytes to read. timeout: operation timeout in milliseconds. (default: 100) Returns a tuple with the data read.
def bulkRead(self, endpoint, size, timeout = 100): r return self.dev.read(endpoint, size, timeout)
210,136
r"""Perform a interrupt write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default...
def interruptWrite(self, endpoint, buffer, timeout = 100): r return self.dev.write(endpoint, buffer, timeout)
210,137
r"""Performs a interrupt read request to the endpoint specified. Arguments: endpoint: endpoint number. size: number of bytes to read. timeout: operation timeout in milliseconds. (default: 100) Returns a tuple with the data read.
def interruptRead(self, endpoint, size, timeout = 100): r return self.dev.read(endpoint, size, timeout)
210,138
r"""Claims the interface with the Operating System. Arguments: interface: interface number or an Interface object.
def claimInterface(self, interface): r if isinstance(interface, Interface): interface = interface.interfaceNumber util.claim_interface(self.dev, interface) self.__claimed_interface = interface
210,140
r"""Set the active configuration of a device. Arguments: configuration: a configuration value or a Configuration object.
def setConfiguration(self, configuration): r if isinstance(configuration, Configuration): configuration = configuration.value self.dev.set_configuration(configuration)
210,142
r"""Sets the active alternate setting of the current interface. Arguments: alternate: an alternate setting number or an Interface object.
def setAltInterface(self, alternate): r if isinstance(alternate, Interface): alternate = alternate.alternateSetting self.dev.set_interface_altsetting(self.__claimed_interface, alternate)
210,143
r"""Retrieve the string descriptor specified by index and langid from a device. Arguments: index: index of descriptor in the device. length: number of bytes of the string (ignored) langid: Language ID. If it is omitted, the first language will...
def getString(self, index, length, langid = None): r return util.get_string(self.dev, index, langid).encode('ascii')
210,144
r"""Retrieves a descriptor from the device identified by the type and index of the descriptor. Arguments: desc_type: descriptor type. desc_index: index of the descriptor. len: descriptor length. endpoint: ignored.
def getDescriptor(self, desc_type, desc_index, length, endpoint = -1): r return control.get_descriptor(self.dev, length, desc_type, desc_index)
210,145
r"""Detach a kernel driver from the interface (if one is attached, we have permission and the operation is supported by the OS) Arguments: interface: interface number or an Interface object.
def detachKernelDriver(self, interface): r if isinstance(interface, Interface): interface = interface.interfaceNumber self.dev.detach_kernel_driver(interface)
210,146