code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def cells(self): n = 0 for (order, cells) in self: n += len(cells) return n
The number of cells in the MOC. This gives the total number of cells at all orders, with cells from every order counted equally. >>> m = MOC(0, (1, 2)) >>> m.cells 2
def _pull_content_revision_parent(self): if self._revision_id is None: query_params = { "prop": "extracts|revisions", "explaintext": "", "rvprop": "ids", } query_params.update(self.__title_query_param()) request = self.mediawiki.wiki_request(query_params) page_info = request["query"]["pages"][self.pageid] self._content = page_info["extract"] self._revision_id = page_info["revisions"][0]["revid"] self._parent_id = page_info["revisions"][0]["parentid"] return self._content, self._revision_id, self._parent_id
combine the pulling of these three properties
def getBottomLeft(self): x1 = float(self.get_x1()) x2 = float(self.get_x2()) y1 = float(self.get_y1()) y2 = float(self.get_y2()) if x1 < x2: if y1 < y2: return (x1, y1) else: return (x1, y2) else: if y1 < y2: return (x2, y1) else: return (x2, y2)
Retrieves the the bottom left coordinate of the line as tuple. Coordinates must be numbers.
def getExtensions(self, extname='SCI', section=None): if section is None: numext = 0 section = [] for hdu in self._image: if 'extname' in hdu.header and hdu.header['extname'] == extname: section.append(hdu.header['extver']) else: if not isinstance(section,list): section = [section] return section
Return the list of EXTVER values for extensions with name specified in extname.
def simple_swap(ins: Instruction) -> Instruction: try: rule = ins.details['transform']['simple_swap'] except KeyError: return ins replacement_ins = opcode_table[rule['op']] return Instruction( replacement_ins['mnemonic'], replacement_ins['op'], [Operand( replacement_ins['operands'][i][1], r ) for i, r in enumerate(rule['operands'])], ins.pos )
Replaces one instruction with another based on the transform rules in the bytecode definitions. This can help simplify your code as it reduces the overall number of instructions. For example, `aload_0` will become `aload 0`. :param ins: Instruction to potentially modify. :return: Potentially modified instruction.
def get_jvm_options(self): ret = [] for opt in self.get_options().options: ret.extend(safe_shlex_split(opt)) if (self.get_options().debug or self.get_options().is_flagged('debug_port') or self.get_options().is_flagged('debug_args')): debug_port = self.get_options().debug_port ret.extend(arg.format(debug_port=debug_port) for arg in self.get_options().debug_args) return ret
Return the options to run this JVM with. These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on. Thus named because get_options() already exists (and returns this object's Pants options).
def find_experiment_export(app_id): cwd = os.getcwd() data_filename = "{}-data.zip".format(app_id) path_to_data = os.path.join(cwd, "data", data_filename) if os.path.exists(path_to_data): try: Data(path_to_data) except IOError: from dallinger import logger logger.exception( "Error reading local data file {}, checking remote.".format( path_to_data ) ) else: return path_to_data path_to_data = os.path.join(tempfile.mkdtemp(), data_filename) buckets = [user_s3_bucket(), dallinger_s3_bucket()] for bucket in buckets: try: bucket.download_file(data_filename, path_to_data) except botocore.exceptions.ClientError: pass else: return path_to_data
Attempt to find a zipped export of an experiment with the ID provided and return its path. Returns None if not found. Search order: 1. local "data" subdirectory 2. user S3 bucket 3. Dallinger S3 bucket
def isidentifier(s, dotted=False): if dotted: return all(isidentifier(a) for a in s.split('.')) if PY3: return s.isidentifier() else: import re _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") return bool(_name_re.match(s))
A function equivalent to the str.isidentifier method on Py3
def minWidth(self): frags = self.frags nFrags = len(frags) if not nFrags: return 0 if nFrags == 1: f = frags[0] fS = f.fontSize fN = f.fontName words = hasattr(f, 'text') and split(f.text, ' ') or f.words func = lambda w, fS=fS, fN=fN: stringWidth(w, fN, fS) else: words = _getFragWords(frags) func = lambda x: x[0] return max(map(func, words))
Attempt to determine a minimum sensible width
def nested_update(d, u): for k, v in list(u.items()): if isinstance(v, collections.Mapping): r = nested_update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d
Merge two nested dicts. Nested dicts are sometimes used for representing various recursive structures. When updating such a structure, it may be convenient to present the updated data as a corresponding recursive structure. This function will then apply the update. Args: d: dict dict that will be updated in-place. May or may not contain nested dicts. u: dict dict with contents that will be merged into ``d``. May or may not contain nested dicts.
def scope(self, key): if self.name is None: return key return '{:s}/{:s}'.format(self.name, key)
Apply the name scope to a key Parameters ---------- key : string Returns ------- `name/key` if `name` is not `None`; otherwise, `key`.
def props_to_image(regionprops, shape, prop): r im = sp.zeros(shape=shape) for r in regionprops: if prop == 'convex': mask = r.convex_image else: mask = r.image temp = mask * r[prop] s = bbox_to_slices(r.bbox) im[s] += temp return im
r""" Creates an image with each region colored according the specified ``prop``, as obtained by ``regionprops_3d``. Parameters ---------- regionprops : list This is a list of properties for each region that is computed by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``. shape : array_like The shape of the original image for which ``regionprops`` was obtained. prop : string The region property of interest. Can be a scalar item such as 'volume' in which case the the regions will be colored by their respective volumes, or can be an image-type property such as 'border' or 'convex_image', which will return an image composed of the sub-images. Returns ------- image : ND-array An ND-image the same size as the original image, with each region represented by the values specified in ``prop``. See Also -------- props_to_DataFrame regionprops_3d
def href(self): url = self.session.base_url + str(self.path) if self.path.is_collection and not self.path.is_root: return url + 's' return url
Return URL of the resource :rtype: str
def mean(self, only_valid=True) -> ErrorValue: if not only_valid: intensity = self.intensity error = self.error else: intensity = self.intensity[self.mask] error = self.error[self.mask] return ErrorValue(intensity.mean(), (error ** 2).mean() ** 0.5)
Calculate the mean of the pixels, not counting the masked ones if only_valid is True.
def Recv(self): size = struct.unpack(_STRUCT_FMT, self._ReadN(_STRUCT_LEN))[0] if size > MAX_SIZE: raise ProtocolError("Expected size to be at most %d, got %d" % (MAX_SIZE, size)) with self._read_lock: buf = self._ReadN(size) self._ReadMagic() res = common_pb2.Message() res.ParseFromString(buf) return res, len(buf)
Accept a message from Fleetspeak. Returns: A tuple (common_pb2.Message, size of the message in bytes). Raises: ProtocolError: If we receive unexpected data from Fleetspeak.
def is_field_visible(self, field): context = self.context fieldname = field.getName() if fieldname == "Client" and context.portal_type in ("Client", ): return False if fieldname == "Batch" and context.portal_type in ("Batch", ): return False return True
Check if the field is visible
def qteIsMiniApplet(self, obj): try: ret = obj._qteAdmin.isMiniApplet except AttributeError: ret = False return ret
Test if instance ``obj`` is a mini applet. |Args| * ``obj`` (**object**): object to test. |Returns| * **bool**: whether or not ``obj`` is the mini applet. |Raises| * **None**
async def try_sending(self,msg,timeout_secs, max_attempts): if timeout_secs is None: timeout_secs = self.timeout if max_attempts is None: max_attempts = self.retry_count attempts = 0 while attempts < max_attempts: if msg.seq_num not in self.message: return event = aio.Event() self.message[msg.seq_num][1]= event attempts += 1 if self.transport: self.transport.sendto(msg.packed_message) try: myresult = await aio.wait_for(event.wait(),timeout_secs) break except Exception as inst: if attempts >= max_attempts: if msg.seq_num in self.message: callb = self.message[msg.seq_num][2] if callb: callb(self, None) del(self.message[msg.seq_num]) self.unregister()
Coroutine used to send message to the device when a response or ack is needed. This coroutine will try to send up to max_attempts time the message, waiting timeout_secs for an answer. If no answer is received, it will consider that the device is no longer accessible and will unregister it. :param msg: The message to send :type msg: aiolifx.Message :param timeout_secs: Number of seconds to wait for a response or ack :type timeout_secs: int :param max_attempts: . :type max_attempts: int :returns: a coroutine to be scheduled :rtype: coroutine
def pull(self): if not os.path.exists(self.repo_dir): yield from self.initialize_repo() else: yield from self.update()
Pull selected repo from a remote git repository, while preserving user changes
def clean_start_time(self): start = self.cleaned_data.get('start_time') if not start: return start active_entries = self.user.timepiece_entries.filter( start_time__gte=start, end_time__isnull=True) for entry in active_entries: output = ('The start time is on or before the current entry: ' '%s - %s starting at %s' % (entry.project, entry.activity, entry.start_time.strftime('%H:%M:%S'))) raise forms.ValidationError(output) return start
Make sure that the start time doesn't come before the active entry
def render(file): fp = file.open() content = fp.read() fp.close() notebook = nbformat.reads(content.decode('utf-8'), as_version=4) html_exporter = HTMLExporter() html_exporter.template_file = 'basic' (body, resources) = html_exporter.from_notebook_node(notebook) return body, resources
Generate the result HTML.
def blend(self, other, ratio=0.5): keep = 1.0 - ratio if not self.space == other.space: raise Exception("Colors must belong to the same color space.") values = tuple(((u * keep) + (v * ratio) for u, v in zip(self.values, other.values))) return self.__class__(self.space, *values)
Blend this color with another color in the same color space. By default, blends the colors half-and-half (ratio: 0.5). :param Color other: The color to blend. :param float ratio: How much to blend (0 -> 1). :rtype: Color :returns: A new spectra.Color
def build_wheel(source_dir, wheel_dir, config_settings=None): if config_settings is None: config_settings = {} requires, backend = _load_pyproject(source_dir) hooks = Pep517HookCaller(source_dir, backend) with BuildEnvironment() as env: env.pip_install(requires) reqs = hooks.get_requires_for_build_wheel(config_settings) env.pip_install(reqs) return hooks.build_wheel(wheel_dir, config_settings)
Build a wheel from a source directory using PEP 517 hooks. :param str source_dir: Source directory containing pyproject.toml :param str wheel_dir: Target directory to create wheel in :param dict config_settings: Options to pass to build backend This is a blocking function which will run pip in a subprocess to install build requirements.
def generate_docs(self): if self.dst.style['out'] == 'numpydoc' and self.dst.numpydoc.first_line is not None: self.first_line = self.dst.numpydoc.first_line self._set_desc() self._set_params() self._set_return() self._set_raises() self._set_other() self._set_raw() self.generated_docs = True
Generates the output docstring
def save_cPkl(fpath, data, verbose=None, n=None): verbose = _rectify_verb_write(verbose) if verbose: print('[util_io] * save_cPkl(%r, data)' % (util_path.tail(fpath, n=n),)) with open(fpath, 'wb') as file_: pickle.dump(data, file_, protocol=2)
Saves data to a pickled file with optional verbosity
def usable_id(cls, id): try: qry_id = int(id) except Exception: qry_id = None if not qry_id: msg = 'unknown identifier %s' % id cls.error(msg) return qry_id
Retrieve id from input which can be num or id.
def raw_response(self, cursor_id=None): if self.flags & 1: if cursor_id is None: raise ProtocolError("No cursor id for getMore operation") msg = "Cursor not found, cursor id: %d" % (cursor_id,) errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: error_object = bson.BSON(self.documents).decode() error_object.setdefault("ok", 0) if error_object["$err"].startswith("not master"): raise NotMasterError(error_object["$err"], error_object) elif error_object.get("code") == 50: raise ExecutionTimeout(error_object.get("$err"), error_object.get("code"), error_object) raise OperationFailure("database error: %s" % error_object.get("$err"), error_object.get("code"), error_object) return [self.documents]
Check the response header from the database, without decoding BSON. Check the response for errors and unpack. Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or OperationFailure. :Parameters: - `cursor_id` (optional): cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response.
def list_feeds(self): feeds = configparser.ConfigParser() feeds.read(self.data_filename) return feeds.sections()
Output a list of all feed names
def link_files(files: set, workspace_src_dir: str, common_parent: str, conf): norm_dir = normpath(workspace_src_dir) base_dir = '' if common_parent: common_parent = normpath(common_parent) base_dir = commonpath(list(files) + [common_parent]) if base_dir != common_parent: raise ValueError('{} is not the common parent of all target ' 'sources and data'.format(common_parent)) logger.debug( 'Rebasing files in image relative to common parent dir {}', base_dir) num_linked = 0 for src in files: abs_src = join(conf.project_root, src) abs_dest = join(conf.project_root, workspace_src_dir, relpath(src, base_dir)) link_node(abs_src, abs_dest, conf.builders_workspace_dir in src) num_linked += 1 return num_linked
Sync the list of files and directories in `files` to destination directory specified by `workspace_src_dir`. "Sync" in the sense that every file given in `files` will be hard-linked under `workspace_src_dir` after this function returns, and no other files will exist under `workspace_src_dir`. For directories in `files`, hard-links of contained files are created recursively. All paths in `files`, and the `workspace_src_dir`, must be relative to `conf.project_root`. If `common_parent` is given, and it is a common parent directory of all `files`, then the `commonm_parent` part is truncated from the sync'ed files destination path under `workspace_src_dir`. :raises FileNotFoundError: If `files` contains files or directories that do not exist. :raises ValueError: If `common_parent` is given (not `None`), but is *NOT* a common parent of all `files`.
def _is_flag_group(obj): return ( isinstance(obj, h5py.Group) and isinstance(obj.get("active"), h5py.Dataset) and isinstance(obj.get("known"), h5py.Dataset) )
Returns `True` if `obj` is an `h5py.Group` that looks like if contains a flag
def fetch_new_id(self, ): parent = self.get_parent() if parent: others = parent._children else: others = [r for r in self.get_root()._reftracks if r.get_parent() is None] others = [r for r in others if r != self and r.get_typ() == self.get_typ() and r.get_element() == self.get_element()] highest = -1 for r in others: identifier = r.get_id() if identifier > highest: highest = identifier return highest + 1
Return a new id for the given reftrack to be set on the refobject The id can identify reftracks that share the same parent, type and element. :returns: A new id :rtype: int :raises: None
def _finish_transaction_with_retry(self, command_name, explict_retry): try: return self._finish_transaction(command_name, explict_retry) except ServerSelectionTimeoutError: raise except ConnectionFailure as exc: try: return self._finish_transaction(command_name, True) except ServerSelectionTimeoutError: raise exc except OperationFailure as exc: if exc.code not in _RETRYABLE_ERROR_CODES: raise try: return self._finish_transaction(command_name, True) except ServerSelectionTimeoutError: raise exc
Run commit or abort with one retry after any retryable error. :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". - `explict_retry`: True when this is an explict commit retry attempt, ie the application called session.commit_transaction() twice.
def host_report_msg(hostname, module_name, result, oneline): failed = utils.is_failed(result) msg = '' if module_name in [ 'command', 'shell', 'raw' ] and 'ansible_job_id' not in result and result.get('parsed',True) != False: if not failed: msg = command_generic_msg(hostname, result, oneline, 'success') else: msg = command_generic_msg(hostname, result, oneline, 'FAILED') else: if not failed: msg = regular_generic_msg(hostname, result, oneline, 'success') else: msg = regular_generic_msg(hostname, result, oneline, 'FAILED') return msg
summarize the JSON results for a particular host
def get_values (feature, properties): if feature[0] != '<': feature = '<' + feature + '>' result = [] for p in properties: if get_grist (p) == feature: result.append (replace_grist (p, '')) return result
Returns all values of the given feature specified by the given property set.
def get_all_queues(self): resp = self._call('getAllQueues', proto.Empty()) return [Queue.from_protobuf(q) for q in resp.queues]
Get information about all queues in the cluster. Returns ------- queues : list of Queue Examples -------- >>> client.get_all_queues() [Queue<name='default', percent_used=0.00>, Queue<name='myqueue', percent_used=5.00>, Queue<name='child1', percent_used=10.00>, Queue<name='child2', percent_used=0.00>]
def sort2groups(array, gpat=['_R1','_R2']): groups = [REGroup(gp) for gp in gpat] unmatched = [] for item in array: matched = False for m in groups: if m.match(item): matched = True break if not matched: unmatched.append(item) return [sorted(m.list) for m in groups], sorted(unmatched)
Sort an array of strings to groups by patterns
def maybe_start_recording(tokens, index): if _is_really_comment(tokens, index): return _CommentedLineRecorder(index, tokens[index].line) return None
Return a new _CommentedLineRecorder when it is time to record.
def create(self, edgeList=None, excludeEdges=None, networkName=None, nodeList=None, source=None, verbose=False): network=check_network(self,source, verbose=verbose) PARAMS=set_param(["edgeList","excludeEdges","networkName","nodeList","source"], \ [edgeList,excludeEdges,networkName,nodeList,network]) response=api(url=self.__url+"/create", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Create a new network from a list of nodes and edges in an existing source network. The SUID of the network and view are returned. :param edgeList (string, optional): Specifies a list of edges. The keywords all, selected, or unselected can be used to specify edges by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param excludeEdges (string, optional): Unless this is set to true, edges that connect nodes in the nodeList are implicitly included :param networkName (string, optional): :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param source (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value can also be used to specify the current network. :param verbose: print more :returns: { netowrk, view }
def start_single(self, typ, scol): self.starting_single = True single = self.single = Single(typ=typ, group=self, indent=(scol - self.level)) self.singles.append(single) return single
Start a new single
def log_value(self, name, value, step=None): if isinstance(value, six.string_types): raise TypeError('"value" should be a number, got {}' .format(type(value))) value = float(value) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._scalar_summary(tf_name, value, step) self._log_summary(tf_name, summary, value, step=step)
Log new value for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). value (float): this is a real number to be logged as a scalar. step (int): non-negative integer used for visualization: you can log several different variables on one step, but should not log different values of the same variable on the same step (this is not checked).
def services(namespace='default', **kwargs): cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default
def call(self, name, request=None, **params): if name not in self.resources: raise exceptions.HttpError( 'Unknown method \'%s\'' % name, status=status.HTTP_501_NOT_IMPLEMENTED) request = request or HttpRequest() resource = self.resources[name] view = resource.as_view(api=self) return view(request, **params)
Call resource by ``Api`` name. :param name: The resource's name (short form) :param request: django.http.Request instance :param **params: Params for a resource's call :return object: Result of resource's execution
def dtype_repr(dtype): dtype = np.dtype(dtype) if dtype == np.dtype(int): return "'int'" elif dtype == np.dtype(float): return "'float'" elif dtype == np.dtype(complex): return "'complex'" elif dtype.shape: return "('{}', {})".format(dtype.base, dtype.shape) else: return "'{}'".format(dtype)
Stringify ``dtype`` for ``repr`` with default for int and float.
def path_dwim(basedir, given): if given.startswith("/"): return given elif given.startswith("~/"): return os.path.expanduser(given) else: return os.path.join(basedir, given)
make relative paths work like folks expect.
def get_valid_location(location): if location is not None and cellular_components.get(location) is None: loc = cellular_components_reverse.get(location) if loc is None: raise InvalidLocationError(location) else: return loc return location
Check if the given location represents a valid cellular component.
def check_for_local_repos(repo): repos_dict = Repo().default_repository() if repo in repos_dict: repo_url = repos_dict[repo] if repo_url.startswith("file:///"): return True
Check if repository is local
def speak(self, text): if not self.is_valid_string(text): raise Exception("%s is not ISO-8859-1 compatible." % (text)) if len(text) > 1023: lines = self.word_wrap(text, width=1023) for line in lines: self.queue.put("S%s" % (line)) else: self.queue.put("S%s" % (text))
The main function to convert text into speech.
def generate_accounts(seeds): return { seed: { 'privatekey': encode_hex(sha3(seed)), 'address': encode_hex(privatekey_to_address(sha3(seed))), } for seed in seeds }
Create private keys and addresses for all seeds.
def check_yamls(cls, dap): problems = list() for yaml in dap.assistants_and_snippets: path = yaml + '.yaml' parsed_yaml = YamlLoader.load_yaml_by_path(dap._get_file(path, prepend=True)) if parsed_yaml: try: yaml_checker.check(path, parsed_yaml) except YamlError as e: problems.append(DapProblem(exc_as_decoded_string(e), level=logging.ERROR)) else: problems.append(DapProblem('Empty YAML ' + path, level=logging.WARNING)) return problems
Check that all assistants and snippets are valid. Return list of DapProblems.
def add_json(self, json_obj, **kwargs): return self.add_bytes(encoding.Json().encode(json_obj), **kwargs)
Adds a json-serializable Python dict as a json file to IPFS. .. code-block:: python >>> c.add_json({'one': 1, 'two': 2, 'three': 3}) 'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob' Parameters ---------- json_obj : dict A json-serializable Python dictionary Returns ------- str : Hash of the added IPFS object
def flux_up(self, fluxUpBottom, emission=None): if emission is None: emission = np.zeros_like(self.absorptivity) E = np.concatenate((emission, np.atleast_1d(fluxUpBottom)), axis=-1) return np.squeeze(matrix_multiply(self.Tup, E[..., np.newaxis]))
Compute downwelling radiative flux at interfaces between layers. Inputs: * fluxDownTop: flux down at top * emission: emission from atmospheric levels (N) defaults to zero if not given Returns: * vector of downwelling radiative flux between levels (N+1) element 0 is the flux down to the surface.
def get_name(self, **values) -> str: if not values and self.name: return self.name if values: for ck, cvs in _sorted_items(self.compounds): if ck in cvs and ck in values: continue comp_values = [values.pop(cv, getattr(self, cv)) for cv in cvs] if None not in comp_values: values[ck] = ''.join(rf'{v}' for v in comp_values) return self._get_nice_name(**values)
Get a new name string from this object's name values. :param values: Variable keyword arguments where the **key** should refer to a field on this object that will use the provided **value** to build the new name.
def read_data_from_bin_file(fileName): with open(fileName, mode='rb') as file: fileContent = file.read() (ChannelData, LenOf1Channel, NumOfChannels, SampleTime) = read_data_from_bytes(fileContent) return ChannelData, LenOf1Channel, NumOfChannels, SampleTime
Loads the binary data stored in the a binary file and extracts the data for each channel that was saved, along with the sample rate and length of the data array. Parameters ---------- fileContent : bytes bytes object containing the data from a .bin file exported from the saleae data logger. Returns ------- ChannelData : list List containing a list which contains the data from each channel LenOf1Channel : int The length of the data in each channel NumOfChannels : int The number of channels saved SampleTime : float The time between samples (in seconds) SampleRate : float The sample rate (in Hz)
def tickerId(self, contract_identifier): symbol = contract_identifier if isinstance(symbol, Contract): symbol = self.contractString(symbol) for tickerId in self.tickerIds: if symbol == self.tickerIds[tickerId]: return tickerId else: tickerId = len(self.tickerIds) self.tickerIds[tickerId] = symbol return tickerId
returns the tickerId for the symbol or sets one if it doesn't exits
def disqus_sso_script(context): settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context
Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified.
def is_bytes(string): if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): return True elif six.PY2 and isinstance(string, (buffer, bytearray)): return True return False
Check if a string is a bytes instance :param Union[str, bytes] string: A string that may be string or bytes like :return: Whether the provided string is a bytes type or not :rtype: bool
def __process_acl(self, load, auth_list): if 'eauth' not in load: return auth_list fstr = '{0}.process_acl'.format(load['eauth']) if fstr not in self.auth: return auth_list try: return self.auth[fstr](auth_list, self.opts) except Exception as e: log.debug('Authentication module threw %s', e) return auth_list
Allows eauth module to modify the access list right before it'll be applied to the request. For example ldap auth module expands entries
def get_db(cls): if cls._db: return getattr(cls._client, cls._db) return cls._client.get_default_database()
Return the database for the collection
def divide(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0] with tf.name_scope(name, default_name="divide"): x1, x2 = binary_arguments_to_tensors(x1, x2) return multiply(x1, reciprocal(x2), output_shape=output_shape)
Binary division with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
def is_valid_data(obj): if obj: try: tmp = json.dumps(obj, default=datetime_encoder) del tmp except (TypeError, UnicodeDecodeError): return False return True
Check if data is JSON serializable.
def human_readable(self, dense_repr: Sequence[Sequence[int]]) -> List[List[str]]: transcripts = [] for dense_r in dense_repr: non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0] transcript = self.corpus.indices_to_labels(non_empty_phonemes) transcripts.append(transcript) return transcripts
Returns a human readable version of a dense representation of either or reference to facilitate simple manual inspection.
def get_config(config, default_config): if not config: logging.warning('Using default config: %s', default_config) config = default_config try: with open(config, 'r') as config_file: return yaml.load(config_file) except (yaml.reader.ReaderError, yaml.parser.ParserError, yaml.scanner.ScannerError) as e: raise ConfigError('Invalid yaml file: \n %s' % str(e))
Load configuration from file if in config, else use default
def clean_email(self): if get_user_model().objects.filter( Q(email__iexact=self.cleaned_data['email']) | Q(email_unconfirmed__iexact=self.cleaned_data['email'])): raise forms.ValidationError(_(u'This email address is already ' 'in use. Please supply a different email.')) return self.cleaned_data['email']
Validate that the email address is unique.
def totalNumberOfTiles(self, minZoom=None, maxZoom=None): "Return the total number of tiles for this instance extent" nbTiles = 0 minZoom = minZoom or 0 if maxZoom: maxZoom = maxZoom + 1 else: maxZoom = len(self.RESOLUTIONS) for zoom in xrange(minZoom, maxZoom): nbTiles += self.numberOfTilesAtZoom(zoom) return nbTiles
Return the total number of tiles for this instance extent
def _check(peers): if not isinstance(peers, list): return False for peer in peers: if not isinstance(peer, six.string_types): return False if not HAS_NETADDR: return True ip_only_peers = [] for peer in peers: try: ip_only_peers.append(six.text_type(IPAddress(peer))) except AddrFormatError: if not HAS_DNSRESOLVER: continue dns_reply = [] try: dns_reply = dns.resolver.query(peer) except dns.resolver.NoAnswer: return False for dns_ip in dns_reply: ip_only_peers.append(six.text_type(dns_ip)) peers = ip_only_peers return True
Checks whether the input is a valid list of peers and transforms domain names into IP Addresses
def x_runtime(f, *args, **kwargs): _t0 = now() r = f(*args, **kwargs) _t1 = now() r.headers['X-Runtime'] = '{0}s'.format(Decimal(str(_t1 - _t0))) return r
X-Runtime Flask Response Decorator.
def found(self): if 'ids' in self.kwargs: cid = self.kwargs['query']['collection']['eq'] return len(self.items_by_id(self.kwargs['ids'], cid)) kwargs = { 'page': 1, 'limit': 0 } kwargs.update(self.kwargs) results = self.query(**kwargs) return results['meta']['found']
Small query to determine total number of hits
def locate_private_alleles(*acs): acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) pac = np.dstack(acs) npa = np.sum(pac > 0, axis=2) loc_pa = npa == 1 return loc_pa
Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True])
def gradfunc(self, p): self._set_stochastics(p) for i in xrange(self.len): self.grad[i] = self.diff(i) return -1 * self.grad
The gradient-computing function that gets passed to the optimizers, if needed.
def function_call_with_timeout(fun_name, fun_args, secs=5): from multiprocessing import Process, Queue p = Process(target=fun_name, args=tuple(fun_args)) p.start() curr_secs = 0 no_timeout = False if secs == 0: no_timeout = True else: timeout = secs while p.is_alive() and not no_timeout: if curr_secs > timeout: print("Process time has exceeded timeout, terminating it.") p.terminate() return False time.sleep(0.1) curr_secs += 0.1 p.join() return True
Run a Python function with a timeout. No interprocess communication or return values are handled. Setting secs to 0 gives infinite timeout.
def __flush(self, async=True): rh = self.rh messages = list(self.messages) stream_notices = list(self.stream_notices) self.stream_notices = [] self.messages = [] args = (rh, messages, stream_notices) if async: self.hub.threadPool.execute_named(self.__inner_flush, '%s __inner__flush' % self.hub.l.name, *args) else: self.__inner_flush(*args) self.rh = None self._set_timeout(int(time.time() + self.hub.timeout))
Flushes messages through current HttpRequest and closes it. It assumes a current requesthandler and requires a lock on self.lock
def recv_exit_status(self, command, timeout=10, get_pty=False): status = None self.last_command = command stdin, stdout, stderr = self.cli.exec_command(command, get_pty=get_pty) if stdout and stderr and stdin: for _ in range(timeout): if stdout.channel.exit_status_ready(): status = stdout.channel.recv_exit_status() break time.sleep(1) self.last_stdout = stdout.read() self.last_stderr = stderr.read() stdin.close() stdout.close() stderr.close() return status
Execute a command and get its return value @param command: command to execute @type command: str @param timeout: command execution timeout @type timeout: int @param get_pty: get pty @type get_pty: bool @return: the exit code of the process or None in case of timeout @rtype: int or None
def has_object_permission(self, request, view, obj): if not self.object_permissions: return True serializer_class = view.get_serializer_class() model_class = serializer_class.Meta.model action_method_name = None if hasattr(view, 'action'): action = self._get_action(view.action) action_method_name = "has_object_{action}_permission".format(action=action) if hasattr(obj, action_method_name): return getattr(obj, action_method_name)(request) if request.method in permissions.SAFE_METHODS: assert hasattr(obj, 'has_object_read_permission'), \ self._get_error_message(model_class, 'has_object_read_permission', action_method_name) return obj.has_object_read_permission(request) else: assert hasattr(obj, 'has_object_write_permission'), \ self._get_error_message(model_class, 'has_object_write_permission', action_method_name) return obj.has_object_write_permission(request)
Overrides the standard function and figures out methods to call for object permissions.
def parse_if(self): node = result = nodes.If(lineno=self.stream.expect('name:if').lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(('name:elif', 'name:else', 'name:endif')) node.elif_ = [] node.else_ = [] token = next(self.stream) if token.test('name:elif'): node = nodes.If(lineno=self.stream.current.lineno) result.elif_.append(node) continue elif token.test('name:else'): result.else_ = self.parse_statements(('name:endif',), drop_needle=True) break return result
Parse an if construct.
def get_user_if_exists(strategy, details, user=None, *args, **kwargs): if user: return {'is_new': False} try: username = details.get('username') return { 'is_new': False, 'user': User.objects.get(username=username) } except User.DoesNotExist: pass return {}
Return a User with the given username iff the User exists.
def get_next_invalid_time_from_t(self, timestamp): if not self.is_time_valid(timestamp): return timestamp t_day = self.get_next_invalid_day(timestamp) if timestamp < t_day: sec_from_morning = self.get_next_future_timerange_invalid(t_day) else: sec_from_morning = self.get_next_future_timerange_invalid(timestamp) if t_day is not None and sec_from_morning is not None: return t_day + sec_from_morning + 1 if t_day is not None and sec_from_morning is None: return t_day timestamp = get_day(timestamp) + 86400 t_day2 = self.get_next_invalid_day(timestamp) sec_from_morning = self.get_next_future_timerange_invalid(t_day2) if t_day2 is not None and sec_from_morning is not None: return t_day2 + sec_from_morning + 1 if t_day2 is not None and sec_from_morning is None: return t_day2 return None
Get next invalid time for time range :param timestamp: time we compute from :type timestamp: int :return: timestamp of the next invalid time (LOCAL TIME) :rtype: int
def stelab(pobj, vobs): pobj = stypes.toDoubleVector(pobj) vobs = stypes.toDoubleVector(vobs) appobj = stypes.emptyDoubleVector(3) libspice.stelab_c(pobj, vobs, appobj) return stypes.cVectorToPython(appobj)
Correct the apparent position of an object for stellar aberration. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/stelab_c.html :param pobj: Position of an object with respect to the observer. :type pobj: 3-Element Array of floats :param vobs: Velocity of the observer with respect to the Solar System barycenter. :type vobs: 3-Element Array of floats :return: Apparent position of the object with respect to the observer, corrected for stellar aberration. :rtype: 3-Element Array of floats
def configure(self, **configs): configs = self._deprecate_configs(**configs) self._config = {} for key in self.DEFAULT_CONFIG: self._config[key] = configs.pop(key, self.DEFAULT_CONFIG[key]) if configs: raise KafkaConfigurationError('Unknown configuration key(s): ' + str(list(configs.keys()))) if self._config['auto_commit_enable']: if not self._config['group_id']: raise KafkaConfigurationError( 'KafkaConsumer configured to auto-commit ' 'without required consumer group (group_id)' ) if self._config['auto_commit_enable']: logger.info("Configuring consumer to auto-commit offsets") self._reset_auto_commit() if not self._config['bootstrap_servers']: raise KafkaConfigurationError( 'bootstrap_servers required to configure KafkaConsumer' ) self._client = KafkaClient( self._config['bootstrap_servers'], client_id=self._config['client_id'], timeout=(self._config['socket_timeout_ms'] / 1000.0) )
Configure the consumer instance Configuration settings can be passed to constructor, otherwise defaults will be used: Keyword Arguments: bootstrap_servers (list): List of initial broker nodes the consumer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. client_id (str): a unique name for this client. Defaults to 'kafka.consumer.kafka'. group_id (str): the name of the consumer group to join, Offsets are fetched / committed to this group name. fetch_message_max_bytes (int, optional): Maximum bytes for each topic/partition fetch request. Defaults to 1024*1024. fetch_min_bytes (int, optional): Minimum amount of data the server should return for a fetch request, otherwise wait up to fetch_wait_max_ms for more data to accumulate. Defaults to 1. fetch_wait_max_ms (int, optional): Maximum time for the server to block waiting for fetch_min_bytes messages to accumulate. Defaults to 100. refresh_leader_backoff_ms (int, optional): Milliseconds to backoff when refreshing metadata on errors (subject to random jitter). Defaults to 200. socket_timeout_ms (int, optional): TCP socket timeout in milliseconds. Defaults to 30*1000. auto_offset_reset (str, optional): A policy for resetting offsets on OffsetOutOfRange errors. 'smallest' will move to the oldest available message, 'largest' will move to the most recent. Any ofther value will raise the exception. Defaults to 'largest'. deserializer_class (callable, optional): Any callable that takes a raw message value and returns a deserialized value. Defaults to lambda msg: msg. auto_commit_enable (bool, optional): Enabling auto-commit will cause the KafkaConsumer to periodically commit offsets without an explicit call to commit(). Defaults to False. auto_commit_interval_ms (int, optional): If auto_commit_enabled, the milliseconds between automatic offset commits. Defaults to 60 * 1000. auto_commit_interval_messages (int, optional): If auto_commit_enabled, a number of messages consumed between automatic offset commits. Defaults to None (disabled). consumer_timeout_ms (int, optional): number of millisecond to throw a timeout exception to the consumer if no message is available for consumption. Defaults to -1 (dont throw exception). Configuration parameters are described in more detail at http://kafka.apache.org/documentation.html#highlevelconsumerapi
def create_shared(self, name, ref): if self._shared is not None: raise RuntimeError('Can only set_shared once.') self._shared = GLShared(name, ref)
For the app backends to create the GLShared object. Parameters ---------- name : str The name. ref : object The reference.
def compute_fw_at_frac_max_1d_simple(Y, xc, X=None, f=0.5): yy = np.asarray(Y) if yy.ndim != 1: raise ValueError('array must be 1-d') if yy.size == 0: raise ValueError('array is empty') if X is None: xx = np.arange(yy.shape[0]) else: xx = X xpix = coor_to_pix_1d(xc - xx[0]) try: peak = yy[xpix] except IndexError: raise ValueError('peak is out of array') fwhm_x, _codex, _msgx = compute_fwhm_1d(xx, yy - f * peak, xc, xpix) return peak, fwhm_x
Compute the full width at fraction f of the maximum
def upload(self, filename, filedata=None, filepath=None, **kwargs): if filepath is None and filedata is None: raise GitlabUploadError("No file contents or path specified") if filedata is not None and filepath is not None: raise GitlabUploadError("File contents and file path specified") if filepath is not None: with open(filepath, "rb") as f: filedata = f.read() url = ('/projects/%(id)s/uploads' % { 'id': self.id, }) file_info = { 'file': (filename, filedata), } data = self.manager.gitlab.http_post(url, files=file_info) return { "alt": data['alt'], "url": data['url'], "markdown": data['markdown'] }
Upload the specified file into the project. .. note:: Either ``filedata`` or ``filepath`` *MUST* be specified. Args: filename (str): The name of the file being uploaded filedata (bytes): The raw data of the file being uploaded filepath (str): The path to a local file to upload (optional) Raises: GitlabConnectionError: If the server cannot be reached GitlabUploadError: If the file upload fails GitlabUploadError: If ``filedata`` and ``filepath`` are not specified GitlabUploadError: If both ``filedata`` and ``filepath`` are specified Returns: dict: A ``dict`` with the keys: * ``alt`` - The alternate text for the upload * ``url`` - The direct url to the uploaded file * ``markdown`` - Markdown for the uploaded file
def updateColormap(self): if self.imgArgs['lut'] is not None: self.img.setLookupTable(self.imgArgs['lut']) self.img.setLevels(self.imgArgs['levels'])
Updates the currently colormap accoring to stored settings
def _resume_ssl_session( server_info: ServerConnectivityInfo, ssl_version_to_use: OpenSslVersionEnum, ssl_session: Optional[nassl._nassl.SSL_SESSION] = None, should_enable_tls_ticket: bool = False ) -> nassl._nassl.SSL_SESSION: ssl_connection = server_info.get_preconfigured_ssl_connection(override_ssl_version=ssl_version_to_use) if not should_enable_tls_ticket: ssl_connection.ssl_client.disable_stateless_session_resumption() if ssl_session: ssl_connection.ssl_client.set_session(ssl_session) try: ssl_connection.connect() new_session = ssl_connection.ssl_client.get_session() finally: ssl_connection.close() return new_session
Connect to the server and returns the session object that was assigned for that connection. If ssl_session is given, tries to resume that session.
def create(self, networkipv4s): data = {'networks': networkipv4s} return super(ApiNetworkIPv4, self).post('api/v3/networkv4/', data)
Method to create network-ipv4's :param networkipv4s: List containing networkipv4's desired to be created on database :return: None
def remover(self, id_groupl3): if not is_valid_int_param(id_groupl3): raise InvalidParameterError( u'The identifier of Group L3 is invalid or was not informed.') url = 'groupl3/' + str(id_groupl3) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
Remove Group L3 from by the identifier. :param id_groupl3: Identifier of the Group L3. Integer value and greater than zero. :return: None :raise InvalidParameterError: The identifier of Group L3 is null and invalid. :raise GrupoL3NaoExisteError: Group L3 not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def to_dict(self, properties=True): nodes = {} for node in self.nodes(): nd = { 'label': node.pred.short_form(), 'edges': self.edges(node.nodeid) } if node.lnk is not None: nd['lnk'] = {'from': node.cfrom, 'to': node.cto} if properties: if node.cvarsort is not None: nd['type'] = node.cvarsort props = node.properties if props: nd['properties'] = props if node.carg is not None: nd['carg'] = node.carg nodes[node.nodeid] = nd return {'top': self.top, 'nodes': nodes}
Encode the Eds as a dictionary suitable for JSON serialization.
def axis(self) -> Callable[[Any], Any]: axis_func = hist_axis_func( axis_type = self.axis_type ) return axis_func
Determine the axis to return based on the hist type.
def _forceInt(x,y,z,dens,b2,c2,i,glx=None,glw=None): def integrand(s): t= 1/s**2.-1. return dens(numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)))\ *(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2))\ /numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.)) if glx is None: return integrate.quad(integrand,0.,1.)[0] else: return numpy.sum(glw*integrand(glx))
Integral that gives the force in x,y,z
def _calculate_solar_time(self, hour, eq_of_time, is_solar_time): if is_solar_time: return hour return ( (hour * 60 + eq_of_time + 4 * math.degrees(self._longitude) - 60 * self.time_zone) % 1440) / 60
Calculate Solar time for an hour.
def fromLatex(tex, *args, **kwargs): source = TexSoup(tex) return TOC('[document]', source=source, descendants=list(source.descendants), *args, **kwargs)
Creates abstraction using Latex :param str tex: Latex :return: TreeOfContents object
def remove_overlap(self, begin, end=None): hitlist = self.at(begin) if end is None else self.overlap(begin, end) for iv in hitlist: self.remove(iv)
Removes all intervals overlapping the given point or range. Completes in O((r+m)*log n) time, where: * n = size of the tree * m = number of matches * r = size of the search range (this is 1 for a point)
def str_deps(self): lines = [] app = lines.append app("Dependencies of node %s:" % str(self)) for i, dep in enumerate(self.deps): app("%d) %s, status=%s" % (i, dep.info, str(dep.status))) return "\n".join(lines)
Return the string representation of the dependencies of the node.
async def execute(self, keys=[], args=[], client=None): "Execute the script, passing any required ``args``" if client is None: client = self.registered_client args = tuple(keys) + tuple(args) if isinstance(client, BasePipeline): client.scripts.add(self) try: return await client.evalsha(self.sha, len(keys), *args) except NoScriptError: self.sha = await client.script_load(self.script) return await client.evalsha(self.sha, len(keys), *args)
Execute the script, passing any required ``args``
def pubmed_url(args=sys.argv[1:], resolve_doi=True, out=sys.stdout): parser = argparse.ArgumentParser( description='Get a publication URL using a PubMed ID or PubMed URL') parser.add_argument('query', help='PubMed ID or PubMed URL') parser.add_argument( '-d', '--doi', action='store_false', help='get DOI URL') parser.add_argument( '-e', '--email', action='store', help='set user email', default='') args = parser.parse_args(args=args) lookup = PubMedLookup(args.query, args.email) publication = Publication(lookup, resolve_doi=args.doi) out.write(publication.url + '\n')
Get a publication URL via the command line using a PubMed ID or PubMed URL
def geo(self): out = dict(zip(['xmin', 'xres', 'rotation_x', 'ymax', 'rotation_y', 'yres'], self.raster.GetGeoTransform())) out['xmax'] = out['xmin'] + out['xres'] * self.cols out['ymin'] = out['ymax'] + out['yres'] * self.rows return out
General image geo information. Returns ------- dict a dictionary with keys `xmin`, `xmax`, `xres`, `rotation_x`, `ymin`, `ymax`, `yres`, `rotation_y`
def cyvcf_add_filter(rec, name): if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
Add a FILTER value to a cyvcf2 record
def cache_hash(*a, **kw): def cache_str(o): if isinstance(o, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType, types.UnboundMethodType)): return getattr(o, 'func_name', 'func') if isinstance(o, dict): o = [k + ':' + cache_str(v) for k, v in o.items()] if isinstance(o, (list, tuple, set)): o = sorted(map(cache_str, o)) o = '|'.join(o) if isinstance(o, basestring): return o if hasattr(o, 'updated_at'): return cache_str((repr(o), o.updated_at)) return repr(o) hash = cache_str((a, kw)).encode('utf-8') return sha1(hash).hexdigest()
Try to hash an arbitrary object for caching.
def get_soql_fields(soql): soql_fields = re.search('(?<=select)(?s)(.*)(?=from)', soql, re.IGNORECASE) soql_fields = re.sub(' ', '', soql_fields.group()) soql_fields = re.sub('\t', '', soql_fields) fields = re.split(',|\n|\r|', soql_fields) fields = [field for field in fields if field != ''] return fields
Gets queried columns names.
def _one_hidden(self, l:int)->Tensor: "Return one hidden state." nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir return one_param(self).new(1, self.bs, nh).zero_()
Return one hidden state.
def read(self): found = Client.read(self) if self.needs_distribute_ready(): self.distribute_ready() return found
Read some number of messages