code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def construct_stable_id( parent_context, polymorphic_type, relative_char_offset_start, relative_char_offset_end, ): doc_id, _, parent_doc_char_start, _ = split_stable_id(parent_context.stable_id) start = parent_doc_char_start + relative_char_offset_start end = parent_doc_char_start + relative_char_offset_end return f"{doc_id}::{polymorphic_type}:{start}:{end}"
Contruct a stable ID for a Context given its parent and its character offsets relative to the parent.
def get(self, sid): return ChannelContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
Constructs a ChannelContext :param sid: The sid :returns: twilio.rest.chat.v1.service.channel.ChannelContext :rtype: twilio.rest.chat.v1.service.channel.ChannelContext
def create_unsigned_transaction(cls, *, nonce: int, gas_price: int, gas: int, to: Address, value: int, data: bytes) -> 'BaseUnsignedTransaction': return cls.get_transaction_class().create_unsigned_transaction( nonce=nonce, gas_price=gas_price, gas=gas, to=to, value=value, data=data )
Proxy for instantiating an unsigned transaction for this VM.
def artUrl(self): art = self.firstAttr('art', 'grandparentArt') return self._server.url(art, includeToken=True) if art else None
Return the first first art url starting on the most specific for that item.
def extendMarkdown(self, md, md_globals=None): if any( x not in md.treeprocessors for x in self.REQUIRED_EXTENSION_INTERNAL_NAMES): raise RuntimeError( "The attr_cols markdown extension depends the following" " extensions which must preceded it in the extension" " list: %s" % ", ".join(self.REQUIRED_EXTENSIONS)) processor = AttrColTreeProcessor(md, self.conf) md.treeprocessors.register( processor, 'attr_cols', 5)
Initializes markdown extension components.
def side_task(pipe, *side_jobs): assert iterable(pipe), 'side_task needs the first argument to be iterable' for sj in side_jobs: assert callable(sj), 'all side_jobs need to be functions, not {}'.format(sj) side_jobs = (lambda i:i ,) + side_jobs for i in map(pipe, *side_jobs): yield i[0]
allows you to run a function in a pipeline without affecting the data
def _extract_axes_for_slice(self, axes): return {self._AXIS_SLICEMAP[i]: a for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)}
Return the slice dictionary for these axes.
def check_exists_repositories(repo): pkg_list = "PACKAGES.TXT" if repo == "sbo": pkg_list = "SLACKBUILDS.TXT" if check_for_local_repos(repo) is True: pkg_list = "PACKAGES.TXT" return "" if not os.path.isfile("{0}{1}{2}".format( _meta_.lib_path, repo, "_repo/{0}".format(pkg_list))): return repo return ""
Checking if repositories exists by PACKAGES.TXT file
def keyframe(self, index): index = int(index) if index < 0: index %= len(self) if self._keyframe.index == index: return if index == 0: self._keyframe = self.pages[0] return if self._indexed or index < len(self.pages): page = self.pages[index] if isinstance(page, TiffPage): self._keyframe = page return if isinstance(page, TiffFrame): self.pages[index] = page.offset tiffpage = self._tiffpage self._tiffpage = TiffPage try: self._keyframe = self._getitem(index) finally: self._tiffpage = tiffpage self.pages[index] = self._keyframe
Set current keyframe. Load TiffPage from file if necessary.
def multi_replace(instr, search_list=[], repl_list=None): repl_list = [''] * len(search_list) if repl_list is None else repl_list for ser, repl in zip(search_list, repl_list): instr = instr.replace(ser, repl) return instr
Does a string replace with a list of search and replacements TODO: rename
def from_data(cls, data): if not data.shape[1] == 3: raise ValueError("Gyroscope data must have shape (N, 3)") instance = cls() instance.data = data return instance
Create gyroscope stream from data array Parameters ------------------- data : (N, 3) ndarray Data array of angular velocities (rad/s) Returns ------------------- GyroStream Stream object
def fetch_hg_push_log(repo_name, repo_url): newrelic.agent.add_custom_parameter("repo_name", repo_name) process = HgPushlogProcess() process.run(repo_url + '/json-pushes/?full=1&version=2', repo_name)
Run a HgPushlog etl process
def _update(self): aps = [] for k, v in self.records.items(): recall, prec = self._recall_prec(v, self.counts[k]) ap = self._average_precision(recall, prec) aps.append(ap) if self.num is not None and k < (self.num - 1): self.sum_metric[k] = ap self.num_inst[k] = 1 if self.num is None: self.num_inst = 1 self.sum_metric = np.mean(aps) else: self.num_inst[-1] = 1 self.sum_metric[-1] = np.mean(aps)
update num_inst and sum_metric
def send(self, message): message = message.SerializeToString() self.socket.sendall(struct.pack('!I', len(message)) + message) length = struct.unpack('!I', self.socket.recv(4))[0] response = riemann_client.riemann_pb2.Msg() response.ParseFromString(socket_recvall(self.socket, length)) if not response.ok: raise RiemannError(response.error) return response
Sends a message to a Riemann server and returns it's response :param message: The message to send to the Riemann server :returns: The response message from Riemann :raises RiemannError: if the server returns an error
def environment_failure(self, error): log.exception( 'Failed to create environment for %s: %s', self.__class__.__name__, get_error_message(error) ) self.shutdown(error)
Log environment failure for the daemon and exit with the error code. :param error: :return:
def get_process_token(): token = wintypes.HANDLE() res = process.OpenProcessToken( process.GetCurrentProcess(), process.TOKEN_ALL_ACCESS, token) if not res > 0: raise RuntimeError("Couldn't get process token") return token
Get the current process token
def sociallogin_from_response(self, request, response): from allauth.socialaccount.models import SocialLogin, SocialAccount adapter = get_adapter(request) uid = self.extract_uid(response) extra_data = self.extract_extra_data(response) common_fields = self.extract_common_fields(response) socialaccount = SocialAccount(extra_data=extra_data, uid=uid, provider=self.id) email_addresses = self.extract_email_addresses(response) self.cleanup_email_addresses(common_fields.get('email'), email_addresses) sociallogin = SocialLogin(account=socialaccount, email_addresses=email_addresses) user = sociallogin.user = adapter.new_user(request, sociallogin) user.set_unusable_password() adapter.populate_user(request, sociallogin, common_fields) return sociallogin
Instantiates and populates a `SocialLogin` model based on the data retrieved in `response`. The method does NOT save the model to the DB. Data for `SocialLogin` will be extracted from `response` with the help of the `.extract_uid()`, `.extract_extra_data()`, `.extract_common_fields()`, and `.extract_email_addresses()` methods. :param request: a Django `HttpRequest` object. :param response: object retrieved via the callback response of the social auth provider. :return: A populated instance of the `SocialLogin` model (unsaved).
def contains(self, other): return (self.alt == other.alt and self.prefix.endswith(other.prefix) and self.suffix.startswith(other.suffix))
Is the other VariantSequence a subsequence of this one? The two sequences must agree on the alt nucleotides, the prefix of the longer must contain the prefix of the shorter, and the suffix of the longer must contain the suffix of the shorter.
def add_lv_load_area_group(self, lv_load_area_group): if lv_load_area_group not in self.lv_load_area_groups(): self._lv_load_area_groups.append(lv_load_area_group)
Adds a LV load_area to _lv_load_areas if not already existing.
def debug(self, request, message, extra_tags='', fail_silently=False): add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags, fail_silently=fail_silently)
Add a message with the ``DEBUG`` level.
def create_linked_data_element_from_resource(self, resource): mp = self.__mp_reg.find_or_create_mapping(Link) return mp.data_element_class.create_from_resource(resource)
Returns a new linked data element for the given resource object. :returns: object implementing :class:`ILinkedDataElement`.
def isPackage(self, dotted_name, extrapath=None): candidate = self.isModule(dotted_name + '.__init__', extrapath) if candidate: candidate = candidate[:-len(".__init__")] return candidate
Is ``dotted_name`` the name of a package?
def _set_alarm(self, status, home_id): response = self._request( MINUT_HOMES_URL + "/{}".format(home_id), request_type='PUT', json={'alarm_status': status}) return response.get('alarm_status', '') == status
Set alarm satus.
def on_change(self, *callbacks): for callback in callbacks: if callback in self._callbacks: continue _check_callback(callback, ('event',)) self._callbacks[callback] = callback
Provide callbacks to invoke if the document or any Model reachable from its roots changes.
def ismount(self, path): path = make_string_path(path) if not path: return False normed_path = self.filesystem.absnormpath(path) sep = self.filesystem._path_separator(path) if self.filesystem.is_windows_fs: if self.filesystem.alternative_path_separator is not None: path_seps = ( sep, self.filesystem._alternative_path_separator(path) ) else: path_seps = (sep, ) drive, rest = self.filesystem.splitdrive(normed_path) if drive and drive[:1] in path_seps: return (not rest) or (rest in path_seps) if rest in path_seps: return True for mount_point in self.filesystem.mount_points: if normed_path.rstrip(sep) == mount_point.rstrip(sep): return True return False
Return true if the given path is a mount point. Args: path: Path to filesystem object to be checked Returns: `True` if path is a mount point added to the fake file system. Under Windows also returns True for drive and UNC roots (independent of their existence).
def parse_literal(x): if isinstance(x, list): return [parse_literal(y) for y in x] elif isinstance(x, (bytes, str)): try: return int(x) except ValueError: try: return float(x) except ValueError: return x else: raise TypeError('input must be a string or a list of strings')
return the smallest possible data type for a string or list of strings Parameters ---------- x: str or list a string to be parsed Returns ------- int, float or str the parsing result Examples -------- >>> isinstance(parse_literal('1.5'), float) True >>> isinstance(parse_literal('1'), int) True >>> isinstance(parse_literal('foobar'), str) True
def get(self, type: Type[T], query: Mapping[str, Any]) -> T: LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__)) try: handlers = self._get_types[type] except KeyError: try: LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__)) handlers = self._get_handlers(type) except NoConversionError: handlers = None self._get_types[type] = handlers if handlers is None: raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__)) LOGGER.info("Creating new PipelineContext") context = self._new_context() LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__)) for handler in handlers: try: return handler.get(query, context) except NotFoundError: pass raise NotFoundError("No source returned a query result!")
Gets a query from the data pipeline. 1) Extracts the query the sequence of data sources. 2) Inserts the result into the data sinks (if appropriate). 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object.
def count_samples(ns_run, **kwargs): r kwargs.pop('logw', None) kwargs.pop('simulate', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) return ns_run['logl'].shape[0]
r"""Number of samples in run. Unlike most estimators this does not require log weights, but for convenience will not throw an error if they are specified. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). Returns ------- int
def _get(self, key): self._populate_cache() if key not in self._cache: raise AttributeError("DataField has no member {}".format(key)) return self._cache[key]
Return given key from cache.
def stop_monitoring(self) -> None: self._context.optimisation_finished = True self._on_iteration() if self._print_summary: self.print_summary()
The recommended way of using Monitor is opening it with the `with` statement. In this case the user doesn't need to call this function explicitly. Otherwise the function should be called when the optimisation is done. The function sets the optimisation completed flag in the monitoring context and runs the tasks once more. If the monitor was created with the `print_summary` option it prints the tasks' timing summary.
def arch(self): if self.method in ('buildArch', 'createdistrepo', 'livecd'): return self.params[2] if self.method in ('createrepo', 'runroot'): return self.params[1] if self.method == 'createImage': return self.params[3] if self.method == 'indirectionimage': return self.params[0]['arch']
Return an architecture for this task. :returns: an arch string (eg "noarch", or "ppc64le"), or None this task has no architecture associated with it.
def find_existing(self): sg = self.consul.find_secgroup(self.name) current = sg.rules log.debug('Current rules: %s' % current) log.debug('Intended rules: %s' % self.rules) exp_rules = [] for rule in self.rules: exp = ( rule[A.secgroup.PROTOCOL], rule[A.secgroup.FROM], rule[A.secgroup.TO], rule[A.secgroup.SOURCE], ) exp_rules.append(exp) if exp in current: del current[exp] else: self.create_these_rules.append(exp) self.delete_these_rules.extend(current.itervalues()) log.debug('Create these rules: %s' % self.create_these_rules) log.debug('Delete these rules: %s' % self.delete_these_rules)
Finds existing rule in secgroup. Populates ``self.create_these_rules`` and ``self.delete_these_rules``.
def _build_callback(self, config): wrapped = config['callback'] plugins = self.plugins + config['apply'] skip = config['skip'] try: for plugin in reversed(plugins): if True in skip: break if plugin in skip or type(plugin) in skip: continue if getattr(plugin, 'name', True) in skip: continue if hasattr(plugin, 'apply'): wrapped = plugin.apply(wrapped, config) else: wrapped = plugin(wrapped) if not wrapped: break functools.update_wrapper(wrapped, config['callback']) return wrapped except RouteReset: return self._build_callback(config)
Apply plugins to a route and return a new callable.
def signature(self): iexec, execmod = self.context.parser.tree_find(self.context.el_name, self.context.module, "executables") if iexec is None: iexec, execmod = self.context.parser.tree_find(self.context.el_name, self.context.module, "interfaces") if iexec is None: return [] return self._signature_index(iexec)
Gets completion or call signature information for the current cursor.
def long_to_bytes(N, blocksize=1): bytestring = hex(N) bytestring = bytestring[2:] if bytestring.startswith('0x') else bytestring bytestring = bytestring[:-1] if bytestring.endswith('L') else bytestring bytestring = '0' + bytestring if (len(bytestring) % 2) != 0 else bytestring bytestring = binascii.unhexlify(bytestring) if blocksize > 0 and len(bytestring) % blocksize != 0: bytestring = '\x00' * \ (blocksize - (len(bytestring) % blocksize)) + bytestring return bytestring
Given an input integer ``N``, ``long_to_bytes`` returns the representation of ``N`` in bytes. If ``blocksize`` is greater than ``1`` then the output string will be right justified and then padded with zero-bytes, such that the return values length is a multiple of ``blocksize``.
def wait_for_and_dismiss_alert(driver, timeout=settings.LARGE_TIMEOUT): alert = wait_for_and_switch_to_alert(driver, timeout) alert_text = alert.text alert.dismiss() return alert_text
Wait for and dismiss an alert. Returns the text from the alert. @Params driver - the webdriver object (required) timeout - the time to wait for the alert in seconds
def update_reading_events(readings, event_record): for i in range(event_record.start_interval - 1, event_record.end_interval): readings[i] = Reading( t_start=readings[i].t_start, t_end=readings[i].t_end, read_value=readings[i].read_value, uom=readings[i].uom, quality_method=event_record.quality_method, event_code=event_record.reason_code, event_desc=event_record.reason_description, read_start=readings[i].read_start, read_end=readings[i].read_end) return readings
Updates readings from a 300 row to reflect any events found in a subsequent 400 row
def _data(self): self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl))) return NDArray(hdl)
A deep copy NDArray of the data array associated with the BaseSparseNDArray. This function blocks. Do not use it in performance critical code.
def up(ctx, instance_id): session = create_session(ctx.obj['AWS_PROFILE_NAME']) ec2 = session.resource('ec2') try: instance = ec2.Instance(instance_id) instance.start() except botocore.exceptions.ClientError as e: click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True) sys.exit(2)
Start EC2 instance
def validate_candidates(self): async def slave_task(addr, candidates): r_manager = await self.env.connect(addr) return await r_manager.validate_candidates(candidates) self._log(logging.DEBUG, "Validating {} candidates" .format(len(self.candidates))) candidates = self.candidates if self._single_env: self._candidates = self.env.validate_candidates(candidates) else: mgrs = self.get_managers() tasks = create_tasks(slave_task, mgrs, candidates, flatten=False) rets = run(tasks) valid_candidates = set(self.candidates) for r in rets: valid_candidates = valid_candidates.intersection(set(r)) self._candidates = list(valid_candidates) self._log(logging.DEBUG, "{} candidates after validation" .format(len(self.candidates)))
Validate current candidates. This method validates the current candidate list in all the agents in the environment (or underlying slave environments) and replaces the current :attr:`candidates` with the list of validated candidates. The artifact candidates must be hashable and have a :meth:`__eq__` implemented for validation to work on multi-environments and distributed environments.
def get_objective_search_session(self): if not self.supports_objective_search(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ObjectiveSearchSession(runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the OsidSession associated with the objective search service. return: (osid.learning.ObjectiveSearchSession) - an ObjectiveSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_search() is false compliance: optional - This method must be implemented if supports_objective_search() is true.
def list(self, entity=None): uri = "/%s" % self.uri_base if entity: uri = "%s?entityId=%s" % (uri, utils.get_id(entity)) resp, resp_body = self._list(uri, return_raw=True) return resp_body
Returns a dictionary of data, optionally filtered for a given entity.
def _WaitForStartup(self, deadline): start = time.time() sleep = 0.05 def Elapsed(): return time.time() - start while True: try: response, _ = self._http.request(self._host) if response.status == 200: logging.info('emulator responded after %f seconds', Elapsed()) return True except (socket.error, httplib.ResponseNotReady): pass if Elapsed() >= deadline: return False else: time.sleep(sleep) sleep *= 2
Waits for the emulator to start. Args: deadline: deadline in seconds Returns: True if the emulator responds within the deadline, False otherwise.
def main(config, host, port, logfile, debug, daemon, uid, gid, pidfile, umask, rundir): _main(**locals())
Main entry point for running a socket server from the commandline. This method will read in options from the commandline and call the L{config.init_config} method to get everything setup. Then, depending on whether deamon mode was specified or not, the process may be forked (or not) and the server will be started.
def getAvailableTemplates(self): try: adapters = getAdapters((self.context, ), IGetStickerTemplates) except ComponentLookupError: logger.info("No IGetStickerTemplates adapters found.") adapters = None templates = [] if adapters is not None: for name, adapter in adapters: templates += adapter(self.request) if templates: return templates seltemplate = self.getSelectedTemplate() for temp in getStickerTemplates(filter_by_type=self.filter_by_type): out = temp out["selected"] = temp.get("id", "") == seltemplate templates.append(out) return templates
Returns an array with the templates of stickers available. Each array item is a dictionary with the following structure: {'id': <template_id>, 'title': <teamplate_title>, 'selected: True/False'}
def available_cpu_count() -> int: try: match = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', open('/proc/self/status').read()) if match: res = bin(int(match.group(1).replace(',', ''), 16)).count('1') if res > 0: return res except IOError: LOG.debug("Could not get the number of allowed CPUs") try: import psutil return psutil.cpu_count() except (ImportError, AttributeError): LOG.debug("Could not get the number of allowed CPUs") try: res = int(os.sysconf('SC_NPROCESSORS_ONLN')) if res > 0: return res except (AttributeError, ValueError): LOG.debug("Could not get the number of allowed CPUs") try: res = open('/proc/cpuinfo').read().count('processor\t:') if res > 0: return res except IOError: LOG.debug("Could not get the number of allowed CPUs") raise Exception('Can not determine number of CPUs on this system')
Get the number of available CPUs. Number of available virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program. Returns: Number of avaialable CPUs.
def instruction_ROL_register(self, opcode, register): a = register.value r = self.ROL(a) register.set(r)
Rotate accumulator left
def get_params(names): params = {} for name in names: value = request.form.get(name) or request.files.get(name) if value is not None: params[name] = value return params
Return a dictionary with params from request. TODO: I think we don't use it anymore and it should be removed before someone gets hurt.
def parse(self, src): self.cssBuilder.beginStylesheet() try: src = cssSpecial.cleanupCSS(src) try: src, stylesheet = self._parseStylesheet(src) except self.ParseError as err: err.setFullCSSSource(src) raise finally: self.cssBuilder.endStylesheet() return stylesheet
Parses CSS string source using the current cssBuilder. Use for embedded stylesheets.
def lan(self, move: Move) -> str: return self._algebraic(move, long=True)
Gets the long algebraic notation of the given move in the context of the current position.
def unshift(self, chunk): if chunk: self._pos -= len(chunk) self._unconsumed.append(chunk)
Pushes a chunk of data back into the internal buffer. This is useful in certain situations where a stream is being consumed by code that needs to "un-consume" some amount of data that it has optimistically pulled out of the source, so that the data can be passed on to some other party.
def _parse(): if not args["reparse"]: settings.use_filesystem_cache = False c = CodeParser() if args["verbose"]: c.verbose = True if args["reparse"]: c.reparse(args["source"]) else: c.parse(args["source"]) return c
Parses the specified Fortran source file from which the wrappers will be constructed for ctypes.
def predict(self, inputs: np.ndarray) -> np.ndarray: return self.sess.run(self.out_var, {self.inp_var: inputs})
Run on multiple inputs
def was_modified_since(header=None, mtime=0, size=0): header_mtime = modified_since(header, size) if header_mtime and header_mtime <= mtime: return False return True
Check if an item was modified since the user last downloaded it :param header: the value of the ``If-Modified-Since`` header. If this is ``None``, simply return ``True`` :param mtime: the modification time of the item in question. :param size: the size of the item.
def _close_window(self, window): if window == self.active_window: self.close_active_window() else: original_active_window = self.active_window self.close_active_window() self.active_window = original_active_window
Close this window.
def _find_global(self, module, func): if module == __name__: if func == '_unpickle_call_error' or func == 'CallError': return _unpickle_call_error elif func == '_unpickle_sender': return self._unpickle_sender elif func == '_unpickle_context': return self._unpickle_context elif func == 'Blob': return Blob elif func == 'Secret': return Secret elif func == 'Kwargs': return Kwargs elif module == '_codecs' and func == 'encode': return self._unpickle_bytes elif module == '__builtin__' and func == 'bytes': return BytesType raise StreamError('cannot unpickle %r/%r', module, func)
Return the class implementing `module_name.class_name` or raise `StreamError` if the module is not whitelisted.
def _index_document(self, document, force=False): query = text( ) self.execute(query, **document)
Adds dataset document to the index.
def save(self, path): self.clip.write_videofile(path, audio_fps=self.clip.audio.fps)
Save source video to file. Args: path (str): Filename to save to. Notes: Saves entire source video to file, not just currently selected frames.
def _check_authentication(self, request, request_args, request_kwargs): try: is_valid, status, reasons = self._verify( request, request_args=request_args, request_kwargs=request_kwargs, ) except Exception as e: logger.debug(e.args) if self.config.debug(): raise e args = e.args if isinstance(e, SanicJWTException) else [] raise exceptions.Unauthorized(*args) return is_valid, status, reasons
Checks a request object to determine if that request contains a valid, and authenticated JWT. It returns a tuple: 1. Boolean whether the request is authenticated with a valid JWT 2. HTTP status code 3. Reasons (if any) for a potential authentication failure
def recruit(self): if not self.networks(full=False): self.log("All networks full: closing recruitment", "-----") self.recruiter.close_recruitment()
Recruit participants to the experiment as needed. This method runs whenever a participant successfully completes the experiment (participants who fail to finish successfully are automatically replaced). By default it recruits 1 participant at a time until all networks are full.
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor): if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor): raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.") sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True) sorted_tensor = tensor.index_select(0, permutation_index) index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device) _, reverse_mapping = permutation_index.sort(0, descending=False) restoration_indices = index_range.index_select(0, reverse_mapping) return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permutation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering.
def _sim_texture(r1, r2): return sum([min(a, b) for a, b in zip(r1["hist_t"], r2["hist_t"])])
calculate the sum of histogram intersection of texture
def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024): if not has_snappy(): raise NotImplementedError("Snappy codec is not available") if xerial_compatible: def _chunker(): for i in range(0, len(payload), xerial_blocksize): yield payload[i:i+xerial_blocksize] out = BytesIO() out.write(_XERIAL_HEADER) for chunk in _chunker(): block = snappy.compress(chunk) out.write(struct.pack('!i', len(block))) out.write(block) out.seek(0) return out.read() else: return snappy.compress(payload)
Compress the given data with the Snappy algorithm. :param bytes payload: Data to compress. :param bool xerial_compatible: If set then the stream is broken into length-prefixed blocks in a fashion compatible with the xerial snappy library. The format winds up being:: +-------------+------------+--------------+------------+--------------+ | Header | Block1_len | Block1 data | BlockN len | BlockN data | |-------------+------------+--------------+------------+--------------| | 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes | +-------------+------------+--------------+------------+--------------+ :param int xerial_blocksize: Number of bytes per chunk to independently Snappy encode. 32k is the default in the xerial library. :returns: Compressed bytes. :rtype: :class:`bytes`
def _trim_zeros_float(str_floats, na_rep='NaN'): trimmed = str_floats def _is_number(x): return (x != na_rep and not x.endswith('inf')) def _cond(values): finite = [x for x in values if _is_number(x)] return (len(finite) > 0 and all(x.endswith('0') for x in finite) and not (any(('e' in x) or ('E' in x) for x in finite))) while _cond(trimmed): trimmed = [x[:-1] if _is_number(x) else x for x in trimmed] return [x + "0" if x.endswith('.') and _is_number(x) else x for x in trimmed]
Trims zeros, leaving just one before the decimal points if need be.
def SendSerializedMessage(self, message): try: ba = Helper.ToArray(message) ba2 = binascii.unhexlify(ba) self.bytes_out += len(ba2) self.transport.write(ba2) except Exception as e: logger.debug(f"Could not send serialized message {e}")
Send the `message` to the remote client. Args: message (neo.Network.Message):
def async_do(self, size=10): if hasattr(self._session, '_async_jobs'): logging.info("Executing asynchronous %s jobs found in queue by using %s threads..." % ( len(self._session._async_jobs), size)) threaded_requests.map(self._session._async_jobs, size=size)
Execute all asynchronous jobs and wait for them to finish. By default it will run on 10 threads. :param size: number of threads to run on.
def designspace(self): if self._designspace_is_complete: return self._designspace self._designspace_is_complete = True list(self.masters) self.to_designspace_axes() self.to_designspace_sources() self.to_designspace_instances() self.to_designspace_family_user_data() if self.bracket_layers: self._apply_bracket_layers() base_family = self.family_name or "Unnamed" base_style = find_base_style(self.font.masters) if base_style: base_style = "-" + base_style name = (base_family + base_style).replace(" ", "") + ".designspace" self.designspace.filename = name return self._designspace
Get a designspace Document instance that links the masters together and holds instance data.
def __assert_false(returned): result = "Pass" if isinstance(returned, str): try: returned = bool(returned) except ValueError: raise try: assert (returned is False), "{0} not False".format(returned) except AssertionError as err: result = "Fail: " + six.text_type(err) return result
Test if an boolean is False
def import_module(module_fqname, superclasses=None): module_name = module_fqname.rpartition(".")[-1] module = __import__(module_fqname, globals(), locals(), [module_name]) modules = [class_ for cname, class_ in inspect.getmembers(module, inspect.isclass) if class_.__module__ == module_fqname] if superclasses: modules = [m for m in modules if issubclass(m, superclasses)] return modules
Imports the module module_fqname and returns a list of defined classes from that module. If superclasses is defined then the classes returned will be subclasses of the specified superclass or superclasses. If superclasses is plural it must be a tuple of classes.
def cancel(self): if self.done(): return False self._client.cancel_operation(self._operation.name) return True
If last Operation's value of `done` is true, returns false; otherwise, issues OperationsClient.cancel_operation and returns true.
def on_message(self, message): if message.address != self._address: return if isinstance(message, velbus.ChannelNamePart1Message) or isinstance(message, velbus.ChannelNamePart1Message2): self._process_channel_name_message(1, message) elif isinstance(message, velbus.ChannelNamePart2Message) or isinstance(message, velbus.ChannelNamePart2Message2): self._process_channel_name_message(2, message) elif isinstance(message, velbus.ChannelNamePart3Message) or isinstance(message, velbus.ChannelNamePart3Message2): self._process_channel_name_message(3, message) elif isinstance(message, velbus.ModuleTypeMessage): self._process_module_type_message(message) else: self._on_message(message)
Process received message
def workspace_cli(ctx, directory, mets_basename, backup): ctx.obj = WorkspaceCtx(os.path.abspath(directory), mets_basename, automatic_backup=backup)
Working with workspace
def sync_in(self, force=False): self.log('---- Sync In ----') self.dstate = self.STATES.BUILDING for path_name in self.source_fs.listdir(): f = self.build_source_files.instance_from_name(path_name) if not f: self.warn('Ignoring unknown file: {}'.format(path_name)) continue if f and f.exists and (f.fs_is_newer or force): self.log('Sync: {}'.format(f.record.path)) f.fs_to_record() f.record_to_objects() self.commit() self.library.search.index_bundle(self, force=True)
Synchronize from files to records, and records to objects
def execute_replay() -> None: files = glob.glob('./replay/toDo/*') sorted_files = sorted(files, key=os.path.getctime) if not sorted_files: LOG.debug('Found %s, beginning execution.', sorted_files) for command_file in sorted_files: with open(command_file, 'r') as command: cmd = command.read() LOG.debug('executing command: %s', cmd) resp = run([cmd, '-v', 'DEBUG'], shell=True, check=True) LOG.debug(resp) LOG.debug('moving %s to archive', command.name) move_command = 'mv {0} ./replay/archive/'.format(command.name) run(move_command, shell=True, check=True) LOG.info('LaunchDarkly is now up to date.') else: LOG.warning('No files found, nothing to replay.')
Execute all commands. For every command that is found in replay/toDo, execute each of them and move the file to the replay/archive directory.
def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None, likelihood=None, **predict_kwargs): return self.posterior_samples_f(X, size, full_cov=full_cov, **predict_kwargs)
Samples the posterior GP at the points X, equivalent to posterior_samples_f due to the absence of a likelihood.
def check_mapping(self, m): if 'name' not in m: self.pr_dbg("Missing %s" % "name") return False for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']: if x not in m or m[x] == "": self.pr_dbg("Missing %s" % x) self.pr_dbg("Full %s" % m) return False if 'doc_values' not in m or m['doc_values'] == "": if not m['name'].startswith('_'): self.pr_dbg("Missing %s" % "doc_values") return False m['doc_values'] = False return True
Assert minimum set of fields in cache, does not validate contents
def eval(e, amplitude, e_0, alpha, e_cutoff, beta): xx = e / e_0 return amplitude * xx ** (-alpha) * np.exp(-(e / e_cutoff) ** beta)
One dimensional power law with an exponential cutoff model function
def pop(self, k, d=_POP_DEFAULT): if d is _POP_DEFAULT: return self._ingredients.pop(k) else: return self._ingredients.pop(k, d)
Pop an ingredient off of this shelf.
def get(self, r): if r is None: return None if r.lower() == '(sp)' and self.stack: return self.stack[-1] if r[:1] == '(': return self.mem[r[1:-1]] r = r.lower() if is_number(r): return str(valnum(r)) if not is_register(r): return None return self.regs[r]
Returns precomputed value of the given expression
def target(self): target = self.path or '/' if self.query: target = '{}?{}'.format(target, self.query) return target
The "target" i.e. local part of the URL, consisting of the path and query.
def query_mxrecords(self): import dns.resolver logging.info('Resolving DNS query...') answers = dns.resolver.query(self.domain, 'MX') addresses = [answer.exchange.to_text() for answer in answers] logging.info( '{} records found:\n{}'.format( len(addresses), '\n '.join(addresses))) return addresses
Looks up for the MX DNS records of the recipient SMTP server
def _encode(self): data = ByteBuffer() if not hasattr(self, '__fields__'): return data.tostring() for field in self.__fields__: field.encode(self, data) return data.tostring()
Encode the message and return a bytestring.
def eye(root=None, zodb_uri=None, port=8080): if root is not None: root_factory = lambda request: Node(root) elif zodb_uri is not None: if '://' not in zodb_uri: zodb_uri = 'file://' + os.path.abspath(zodb_uri) from repoze.zodbconn.finder import PersistentApplicationFinder finder = PersistentApplicationFinder(zodb_uri, appmaker=lambda root: Node(root)) root_factory = lambda request: finder(request.environ) else: raise RuntimeError("Must specify root object or ZODB URI.") app = Eye(root_factory) if 'DEBUG' in os.environ: from repoze.debug.pdbpm import PostMortemDebug app = PostMortemDebug(app) serve(app, host='127.0.0.1', port=port)
Serves a WSGI app to browse objects based on a root object or ZODB URI.
def postprocess_citedReferences(self, entry): if type(entry.citedReferences) is not list: entry.citedReferences = [entry.citedReferences]
If only a single cited reference was found, ensure that ``citedReferences`` is nonetheless a list.
def get_status(self, device_id): devices = self.get_devices() if devices != False: for device in devices: if device['door'] == device_id: return device['status'] return False
List only MyQ garage door devices.
def binarize(self, threshold=0): if not self.is_binarized(): self.pianoroll = (self.pianoroll > threshold)
Binarize the pianoroll. Parameters ---------- threshold : int or float A threshold used to binarize the pianorolls. Defaults to zero.
def _parse(value, strict=True): pattern = r'(?:(?P<hours>\d+):)?(?P<minutes>\d+):(?P<seconds>\d+)' match = re.match(pattern, value) if not match: raise ValueError('Invalid duration value: %s' % value) hours = safe_int(match.group('hours')) minutes = safe_int(match.group('minutes')) seconds = safe_int(match.group('seconds')) check_tuple((hours, minutes, seconds,), strict) return (hours, minutes, seconds,)
Preliminary duration value parser strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values
def _set_magic_constants(self): real_path = os.path.realpath(self._source_filename) self._replace['__FILE__'] = "'%s'" % real_path self._replace['__ROUTINE__'] = "'%s'" % self._routine_name self._replace['__DIR__'] = "'%s'" % os.path.dirname(real_path)
Adds magic constants to replace list.
def load_files(filenames,multiproc=False,**kwargs): filenames = np.atleast_1d(filenames) logger.debug("Loading %s files..."%len(filenames)) kwargs = [dict(filename=f,**kwargs) for f in filenames] if multiproc: from multiprocessing import Pool processes = multiproc if multiproc > 0 else None p = Pool(processes,maxtasksperchild=1) out = p.map(load_file,kwargs) else: out = [load_file(kw) for kw in kwargs] dtype = out[0].dtype for i,d in enumerate(out): if d.dtype != dtype: logger.warn("Casting input data to same type.") out[i] = d.astype(dtype,copy=False) logger.debug('Concatenating arrays...') return np.concatenate(out)
Load a set of FITS files with kwargs.
def _should_split_cell(cls, cell_text: str) -> bool: if ', ' in cell_text or '\n' in cell_text or '/' in cell_text: return True return False
Checks whether the cell should be split. We're just doing the same thing that SEMPRE did here.
def _input_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: self.get_child("input")._handle_substatements(stmt, sctx)
Handle RPC or action input statement.
def on_invite(self, connection, event): sender = self.get_nick(event.source) invited = self.get_nick(event.target) channel = event.arguments[0] if invited == self._nickname: logging.info("! I am invited to %s by %s", channel, sender) connection.join(channel) else: logging.info(">> %s invited %s to %s", sender, invited, channel)
Got an invitation to a channel
def RegisterPlugin(self, report_plugin_cls): name = report_plugin_cls.__name__ if name in self.plugins: raise RuntimeError("Can't register two report plugins with the same " "name. In particular, can't register the same " "report plugin twice: %r" % name) self.plugins[name] = report_plugin_cls
Registers a report plugin for use in the GRR UI.
def get_uninvoiced_hours(entries, billable=None): statuses = ('invoiced', 'not-invoiced') if billable is not None: billable = (billable.lower() == u'billable') entries = [e for e in entries if e.activity.billable == billable] hours = sum([e.hours for e in entries if e.status not in statuses]) return '{0:.2f}'.format(hours)
Given an iterable of entries, return the total hours that have not been invoiced. If billable is passed as 'billable' or 'nonbillable', limit to the corresponding entries.
def format_output(self, rendered_widgets): ret = [u'<ul class="formfield">'] for i, field in enumerate(self.fields): label = self.format_label(field, i) help_text = self.format_help_text(field, i) ret.append(u'<li>%s %s %s</li>' % ( label, rendered_widgets[i], field.help_text and help_text)) ret.append(u'</ul>') return ''.join(ret)
This output will yeild all widgets grouped in a un-ordered list
def resource_urls(request): url_parsed = urlparse(settings.SEARCH_URL) defaults = dict( APP_NAME=__description__, APP_VERSION=__version__, SITE_URL=settings.SITE_URL.rstrip('/'), SEARCH_TYPE=settings.SEARCH_TYPE, SEARCH_URL=settings.SEARCH_URL, SEARCH_IP='%s://%s:%s' % (url_parsed.scheme, url_parsed.hostname, url_parsed.port) ) return defaults
Global values to pass to templates
def list_dvportgroups(dvs=None, portgroup_names=None, service_instance=None): ret_dict = [] proxy_type = get_proxy_type() if proxy_type == 'esxdatacenter': datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] dc_ref = _get_proxy_target(service_instance) elif proxy_type == 'esxcluster': datacenter = __salt__['esxcluster.get_details']()['datacenter'] dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) if dvs: dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) if not dvs_refs: raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' 'retrieved'.format(dvs)) dvs_ref = dvs_refs[0] get_all_portgroups = True if not portgroup_names else False for pg_ref in salt.utils.vmware.get_dvportgroups( parent_ref=dvs_ref if dvs else dc_ref, portgroup_names=portgroup_names, get_all_portgroups=get_all_portgroups): ret_dict.append(_get_dvportgroup_dict(pg_ref)) return ret_dict
Returns a list of distributed virtual switch portgroups. The list can be filtered by the portgroup names or by the DVS. dvs Name of the DVS containing the portgroups. Default value is None. portgroup_names List of portgroup names to look for. If None, all portgroups are returned. Default value is None service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.list_dvporgroups salt '*' vsphere.list_dvportgroups dvs=dvs1 salt '*' vsphere.list_dvportgroups portgroup_names=[pg1] salt '*' vsphere.list_dvportgroups dvs=dvs1 portgroup_names=[pg1]
def get_kwargs(self): return {k: v for k, v in vars(self).items() if k not in self._ignored}
Return kwargs from attached attributes.
def is_transaction_expired(transaction, block_number): is_update_expired = ( isinstance(transaction, ContractSendChannelUpdateTransfer) and transaction.expiration < block_number ) if is_update_expired: return True is_secret_register_expired = ( isinstance(transaction, ContractSendSecretReveal) and transaction.expiration < block_number ) if is_secret_register_expired: return True return False
True if transaction cannot be mined because it has expired. Some transactions are time dependent, e.g. the secret registration must be done before the lock expiration, and the update transfer must be done before the settlement window is over. If the current block is higher than any of these expirations blocks, the transaction is expired and cannot be successfully executed.
def parse(self, file_name): self.object = self.parsed_class() with open(file_name, encoding='utf-8') as f: self.parse_str(f.read()) return self.object
Parse entire file and return relevant object. :param file_name: File path :type file_name: str :return: Parsed object